summaryrefslogtreecommitdiffstats
path: root/ansible_collections/dellemc/unity
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
commit975f66f2eebe9dadba04f275774d4ab83f74cf25 (patch)
tree89bd26a93aaae6a25749145b7e4bca4a1e75b2be /ansible_collections/dellemc/unity
parentInitial commit. (diff)
downloadansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.tar.xz
ansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.zip
Adding upstream version 7.7.0+dfsg.upstream/7.7.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/dellemc/unity')
-rw-r--r--ansible_collections/dellemc/unity/.github/CODEOWNERS18
-rw-r--r--ansible_collections/dellemc/unity/.github/Contributers_guide.md167
-rw-r--r--ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/ask-a-question.md11
-rw-r--r--ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/bug_report.md36
-rw-r--r--ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/config.yml5
-rw-r--r--ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/feature_request.md16
-rw-r--r--ansible_collections/dellemc/unity/.github/pull_request_template.md27
-rw-r--r--ansible_collections/dellemc/unity/.github/workflows/ansible-test.yml123
-rw-r--r--ansible_collections/dellemc/unity/CHANGELOG.rst156
-rw-r--r--ansible_collections/dellemc/unity/FILES.json817
-rw-r--r--ansible_collections/dellemc/unity/LICENSE674
-rw-r--r--ansible_collections/dellemc/unity/MANIFEST.json41
-rw-r--r--ansible_collections/dellemc/unity/MODULE-LICENSE201
-rw-r--r--ansible_collections/dellemc/unity/README.md71
-rw-r--r--ansible_collections/dellemc/unity/ansible.cfg484
-rw-r--r--ansible_collections/dellemc/unity/changelogs/.plugin-cache.yaml102
-rw-r--r--ansible_collections/dellemc/unity/changelogs/changelog.yaml159
-rw-r--r--ansible_collections/dellemc/unity/changelogs/config.yaml33
-rw-r--r--ansible_collections/dellemc/unity/docs/ADOPTERS.md11
-rw-r--r--ansible_collections/dellemc/unity/docs/BRANCHING.md32
-rw-r--r--ansible_collections/dellemc/unity/docs/CODE_OF_CONDUCT.md137
-rw-r--r--ansible_collections/dellemc/unity/docs/COMMITTER_GUIDE.md49
-rw-r--r--ansible_collections/dellemc/unity/docs/CONTRIBUTING.md173
-rw-r--r--ansible_collections/dellemc/unity/docs/INSTALLATION.md100
-rw-r--r--ansible_collections/dellemc/unity/docs/ISSUE_TRIAGE.md308
-rw-r--r--ansible_collections/dellemc/unity/docs/MAINTAINERS.md18
-rw-r--r--ansible_collections/dellemc/unity/docs/MAINTAINER_GUIDE.md38
-rw-r--r--ansible_collections/dellemc/unity/docs/Release Notes.md78
-rw-r--r--ansible_collections/dellemc/unity/docs/SECURITY.md22
-rw-r--r--ansible_collections/dellemc/unity/docs/SUPPORT.md12
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/cifsserver.rst306
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/consistencygroup.rst506
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/filesystem.rst643
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/filesystem_snapshot.rst341
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/host.rst333
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/info.rst582
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/interface.rst254
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/nasserver.rst468
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/nfs.rst626
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/nfsserver.rst242
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/smbshare.rst381
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/snapshot.rst292
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/snapshotschedule.rst421
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/storagepool.rst361
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/tree_quota.rst310
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/user_quota.rst456
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/volume.rst381
-rw-r--r--ansible_collections/dellemc/unity/meta/execution-environment.yml5
-rw-r--r--ansible_collections/dellemc/unity/meta/runtime.yml79
-rw-r--r--ansible_collections/dellemc/unity/plugins/doc_fragments/unity.py53
-rw-r--r--ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/__init__.py0
-rw-r--r--ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/logging_handler.py25
-rw-r--r--ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/utils.py254
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/cifsserver.py630
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py1516
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/filesystem.py1906
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py769
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/host.py1026
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/info.py1784
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/interface.py531
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/nasserver.py1142
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/nfs.py1873
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/nfsserver.py494
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/smbshare.py877
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/snapshot.py751
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py1002
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/storagepool.py879
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/tree_quota.py706
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/user_quota.py1012
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/volume.py1277
-rw-r--r--ansible_collections/dellemc/unity/requirements.txt3
-rw-r--r--ansible_collections/dellemc/unity/requirements.yml3
-rw-r--r--ansible_collections/dellemc/unity/tests/requirements.txt7
-rw-r--r--ansible_collections/dellemc/unity/tests/sanity/ignore-2.12.txt33
-rw-r--r--ansible_collections/dellemc/unity/tests/sanity/ignore-2.13.txt27
-rw-r--r--ansible_collections/dellemc/unity/tests/sanity/ignore-2.14.txt27
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_api_exception.py19
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_cifsserver_api.py200
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_consistencygroup_api.py122
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_filesystem_api.py68
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_host_api.py154
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_interface_api.py122
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nasserver_api.py64
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfs_api.py187
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfsserver_api.py259
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_sdk_response.py32
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_storagepool_api.py168
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_volume_api.py174
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_cifsserver.py169
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_consistencygroup.py193
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_filesystem.py94
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_host.py143
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_interface.py350
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nasserver.py112
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfs.py183
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfsserver.py225
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_storagepool.py132
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_volume.py128
98 files changed, 33011 insertions, 0 deletions
diff --git a/ansible_collections/dellemc/unity/.github/CODEOWNERS b/ansible_collections/dellemc/unity/.github/CODEOWNERS
new file mode 100644
index 000000000..f783d12bf
--- /dev/null
+++ b/ansible_collections/dellemc/unity/.github/CODEOWNERS
@@ -0,0 +1,18 @@
+# CODEOWNERS
+#
+# documentation for this file can be found at:
+# https://help.github.com/en/articles/about-code-owners
+
+# These are the default owners for the code and will
+# be requested for review when someone opens a pull request.
+# order is alphabetical for easier maintenance.
+#
+# Ananthu Kuttattu (kuttattz)
+# Bhavneet Sharma (Bhavneet-Sharma)
+# Jennifer John (Jennifer-John)
+# Meenakshi Dembi (meenakshidembi691)
+# Pavan Mudunuri (Pavan-Mudunuri)
+# Trisha Datta (trisha-dell)
+
+# for all files:
+* @kuttattz @Bhavneet-Sharma @Jennifer-John @meenakshidembi691 @Pavan-Mudunuri @trisha-dell
diff --git a/ansible_collections/dellemc/unity/.github/Contributers_guide.md b/ansible_collections/dellemc/unity/.github/Contributers_guide.md
new file mode 100644
index 000000000..dbca6f7ac
--- /dev/null
+++ b/ansible_collections/dellemc/unity/.github/Contributers_guide.md
@@ -0,0 +1,167 @@
+**Open Source Contributor's guide**
+
+# **Contributors**
+Contributors are individuals willing to contribute to an open source project in GitHub in order to learn, teach, and gain experience. Contributors have the ability to:
+
+Create issues
+
+Create new feature requests
+
+Contribute to fixing issues via pull request
+
+Helping triage issues - although changing labels will be restricted, contributors can help identify duplicates and provide upvotes on issues Contributors must fork the repository and create a branch from main, which will contain code changes related to an issue.
+
+# Branching
+Repositories will use a scaled trunk branching strategy (short-lived branches) in combination with feature flags to allow frequent changes to the main branch without breaking the build with partial features.
+
+Short-lived branches will be created for parts of a feature or a feature in its entirety (depending on the size of the feature)
+
+The main branch is always releasable, meaning code being pushed to main is fully tested and reviewed to ensure it works and does not create and regressions
+
+Maintainers can create branches directly off the main branch Contributors must fork the repository and create a branch from main
+
+
+
+
+
+# Branch Naming
+
+
+|**Branch Type**|**Example**|**Comment**|
+| :- | :- | :- |
+|main|main||
+|Release|release-1.0|hotfix: release-1.1 patch: release-1.0.1|
+|Feature|feature-9-olp-support|"9" referring to GitHub issue ID|
+|Bug Fix|bugfix-110-remove-docker-compose|"110" referring to GitHub issue ID|
+## **Release Branches**
+A release branch is a branch created from main that will be solely used to release a new version. A release branch follows these rules: Maintainers are the only ones that can create a release branch
+
+The branch is named according to the branch naming conventions listed above
+
+No changes are made directly on the feature branch. If there any critical defects, those commits can be cherry-picked from the main branch into the release branch
+
+Only critical bug fixes will be merged into this branch Release branches are never deleted
+
+Release branches are created 10 days prior to a release to allow time for longevity testing to be executed
+
+A release can contain features and/or partial features from future release that will be labelled as "experimental features". Features designated for future releases are continually be worked on while also being merged into the main branch. These features will be optionally enabled during deployment using feature flags, if the user chooses to use them. This gives users early expose to features so they can use and test them providing valuable feedback.
+##
+## **Bug Fix Branches**
+A bug fix branch is a branch which is created for the purpose of fixing a GitHub bug issue.
+
+Maintainers can create bug fix branches from the main branch
+
+Contributors can create bug fix branches after they have forked the repository
+
+Bug fix branches are merged directly into the main branch once a pull request has been approved Bug fix branches are deleted once they have been merged to the main branch
+
+## **Feature Branches**
+A feature branch is created for code changes related to a feature. This can either be dedicated for a full feature, if it is small, or for part of a feature. The goal is deliver feature value incrementally into the main branch so short lived branches delivering partial working feature functionality is the preference.
+
+Maintainers can create feature branches from the main branch
+
+Feature branches are merged directly into main once a pull request has been reviewed Feature branches are deleted once they have been merged to the main branch
+
+## **The Main Branch**
+The main branch is where bug fixes and continuous feature value is being delivered. The main branch is ALWAYS releasable, meaning code being pushed to the main branch is fully tested and reviewed to ensure it works and does not create and regressions. When changes are merged into the main branch, a new "main" image will be created and pushed to Docker Hub. This provides continuous access to the latest and greatest features and fixes being worked on so users can use them and provide valuable feedback.
+
+If a feature or partial feature branch is being merged to main, it must contain documentation updates with any new feature flags that have added or other feature flags that have been changed. The providers the user with information on how to enable/disable an experimental feature.
+
+
+
+
+|**Feature Stage**|**Description**|
+| :- | :- |
+|Alpha|<p></p><p>Disabled by default</p><p>May contain only parts of a feature</p><p>Potentially buggy where enabling the feature may expose bugs Support for the feature may be dropped at any time without notice</p>|
+|Beta|<p></p><p>Enabled by default</p><p>The entire feature has been implemented</p><p>The feature is well tested where enabling the feature is considered safe Support for the feature will not be dropped</p>|
+|GA|<p></p><p>The feature is always enabled and it cannot be disabled via a feature flag The feature flag has been removed</p><p>Considered stable and will appear in many subsequent versions</p>|
+|Deprecated|<p></p><p>Indicates the feature will be deprecated as of a certain version</p><p>When the specific version where it is to be deprecated is released, the feature is disabled and cannot be enabled</p>|
+
+# Example Scenarios
+### **New Partial Feature Branch Merged to Main**
+Lets consider a case where we are working on a partial feature branch for new feature after 1.0.0 has been released.
+
+The branch contains a fully functional and tested part of the feature that is enabled/disabled by a new feature flag The branch contains an update to the feature flag documentation that has an entry for the feature flag
+
+The feature flag is initially set with the Alpha stage
+
+The first branch merged to the main branch for a particular feature must also contain a change to the feature flags document with the following set of information. In the example below we see a new feature flag in alpha stage that is disabled by default. The "Since" value represents the version that the feature (flag) was originally released in. If the feature (flag) has not been released yet, but is available in the main branch, we can use something like "N
+
+/A" as a value.
+
+
+|**Feature**|**Default**|**Stage**|**Since**|**Until**|
+| :-: | :- | :- | :- | :- |
+|new-feature|false|Alpha|N/A||
+
+### **New Release with Partial Feature Destined for Future Release**
+Lets consider a case where a release branch is created for a 1.1.0 release. Given the previous example, the release branch will contain the partially developed "new-feature" which is disabled by default. In this case, the feature flag document would be updated to reflect the correct version that "new- feature" became available. If we know the release plan for "new-feature" going forward, we can also provide those details so users are aware. The example below illustrates the release plan for "new-feature" moving from Alpha to GA.
+
+
+|**Feature**|**Default**|**Stage**|**Since**|**Until**|
+| :-: | :- | :- | :- | :- |
+|new-feature|false|Alpha|1.1.0|1.1.0|
+|new-feature|true|Beta|1.2.0|1.3.0|
+|new-feature|true|GA|1.4.0||
+
+When a feature (flag) reaches the GA stage, all entries of the feature flag in the document must be removed. The feature becomes baked into the product at that point and is always enabled.
+
+### **Deprecating a Feature (Flag)**
+Lets consider a case where a feature (flag) currently in the Beta stage needs to be deprecated. We would announce that as part of the feature flag documentation. Given that the current release is 1.2.0 and "new-feature" is enabled by default, we want to indicate that "new-feature" will become deprecated in version 1.4.0. The follow examples shows how this is done.
+
+
+|**Feature**|**Default**|**Stage**|**Since**|**Until**|
+| :- | :- | :- | :- | :- |
+
+
+|new-feature|true|Beta|1.2.0|1.3.0|
+| :- | :- | :- | :- | :- |
+|new-feature||Deprecated|1.4.0||
+
+This tells the user that the future release 1.4.0 will no longer contain "new-feature".
+
+
+# **Pull Requests**
+Once development on a branch is complete, a pull request is created in accordance with the contributing guide found in the appropriate GitHub repository. A pull request must always link to at least one GitHub issue. If that is not the case, create a GitHub issue and link it.
+
+To increase the chance of having your pull request accepted, make sure your pull request follows these guidelines:
+
+Title and description matches the implementation.
+
+Commits within the pull request follow the formatting guidelines provided in the contributing guide
+
+The pull request can close but may not always close a linked issue - partial feature request pull requests should not close a GitHub feature issue The pull request contains necessary tests that verify the intended behavior
+
+Your branch must be up to date with the main branch before merging If the pull request fixes a bug:
+
+The pull request description must include Fixes #<issue number>.
+
+To avoid regressions, the pull request should include tests that replicate the fixed bug.
+
+When a pull request is merged, all commits are squashed into a single one. It is encouraged to write informative commit messages, as they becomes a part of the Git commit body.
+
+The following pull request template is currently being used and should be used for other open source projects
+
+
+## **GitHub Actions**
+The following GitHub Actions are used to enforce quality gates when a pull request is created or when any commit is made to the pull request. These GitHub Actions enforce our minimum code quality requirements for any code that gets checked into. If any of the checks fail, it is expected that the contributor will look into the check log, understand the problem and resolve the issue. The maintainer is also responsible for reaching out to contributors when failed checks are noticed in order to help resolve them.
+
+### GitHub Repositories
+The following GitHub actions are being used to enforce quality gates on GitHub repositories. No branch can be merged unless the following checks pass.
+
+
+|**GitHub Action**|**Description**|
+| :- | :- |
+|Ansible Sanity tests|Inspects Ansible Sanity|
+|[Malware](https://github.com/dell/common-github-actions/tree/main/malware-scanner) [Scanner](https://github.com/dell/common-github-actions/tree/main/malware-scanner)|Inspects source code for malwares|
+|[Code](https://github.com/dell/common-github-actions/tree/main/code-sanitizer) [Sanitization](https://github.com/dell/common-github-actions/tree/main/code-sanitizer)|Analyzes source code for forbidden words and text. Forbidden words include such as non-inclusive language|
+|[Test Coverage](https://github.com/dell/common-github-actions/tree/main/go-code-tester)|Runs ansible unit tests and check that code coverage per package meets a configured threshold (90%). Flags error if given pull request do not meet the test coverage threshold and blocks the merge of the pull request.|
+|E2E tests|Captures the results of the maintainer triggered E2E tests executed against the pull request. The check will fail if the tests do not pass.|
+
+
+
+
+**Ansible Code of Conduct:**
+
+Every community can be strengthened by a diverse variety of viewpoints, insights, opinions, skillsets, and skill levels. However, with diversity comes the potential for disagreement and miscommunication. The purpose of this Code of Conduct is to ensure that disagreements and differences of opinion are conducted respectfully and on their own merits, without personal attacks or other behavior that might create an unsafe or unwelcoming environment.
+For more details [Ansible Code of conduct](https://docs.ansible.com/ansible/latest/community/code\_of\_conduct.html) can be reffered.
diff --git a/ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/ask-a-question.md b/ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/ask-a-question.md
new file mode 100644
index 000000000..5c762f01f
--- /dev/null
+++ b/ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/ask-a-question.md
@@ -0,0 +1,11 @@
+---
+name: Ask a question
+about: Ask a question.
+title: "[QUESTION]:"
+labels: type/question
+assignees: ''
+
+---
+### How can the Team help you today?
+
+**Details: ?**
diff --git a/ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/bug_report.md b/ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 000000000..9f1d9428f
--- /dev/null
+++ b/ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,36 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: "[BUG]:"
+labels: type/bug, needs-triage
+assignees: ''
+
+---
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Step 1 ...
+2. Step 2 ...
+3. Step 3 ...
+...
+n. Step n See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Logs**
+If applicable, submit logs or stack traces from the affected services
+
+**System Information (please complete the following information):**
+ - OS/Version: [e.g. RHEL 7.6]
+ - Ansible Version [e.g. 2.12]
+ - Python Version [e.g. 3.9]
+ - Additional Information...
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/config.yml b/ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 000000000..6224280ea
--- /dev/null
+++ b/ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: false
+contact_links:
+ - name: Ansible mailing list
+ alias: ansible.team@dell.com
+ about: Please ask and answer usage questions and report security issues here.
diff --git a/ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/feature_request.md b/ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 000000000..db6a70942
--- /dev/null
+++ b/ansible_collections/dellemc/unity/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,16 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: "[FEATURE]:"
+labels: type/feature-request, needs-triage
+assignees: ''
+
+---
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/ansible_collections/dellemc/unity/.github/pull_request_template.md b/ansible_collections/dellemc/unity/.github/pull_request_template.md
new file mode 100644
index 000000000..c87953f4a
--- /dev/null
+++ b/ansible_collections/dellemc/unity/.github/pull_request_template.md
@@ -0,0 +1,27 @@
+# Description
+A few sentences describing the overall goals of the pull request's commits.
+
+# GitHub Issues
+List the GitHub issues impacted by this PR:
+
+| GitHub Issue # |
+| -------------- |
+| |
+
+# Checklist:
+
+- [ ] I have performed a self-review of my own code to ensure there are no formatting, pep8, linting, or security issues
+- [ ] I have performed Ansible Sanity test using --docker default
+- [ ] I have verified that new and existing unit tests pass locally with my changes
+- [ ] I have not allowed coverage numbers to degenerate
+- [ ] I have maintained at least 90% code coverage
+- [ ] I have commented my code, particularly in hard-to-understand areas
+- [ ] I have made corresponding changes to the documentation
+- [ ] I have added tests that prove my fix is effective or that my feature works
+- [ ] Backward compatibility is not broken
+
+# How Has This Been Tested?
+Please describe the tests that you ran to verify your changes. Please also list any relevant details for your test configuration
+
+- [ ] Test A
+- [ ] Test B
diff --git a/ansible_collections/dellemc/unity/.github/workflows/ansible-test.yml b/ansible_collections/dellemc/unity/.github/workflows/ansible-test.yml
new file mode 100644
index 000000000..d9fb5cf04
--- /dev/null
+++ b/ansible_collections/dellemc/unity/.github/workflows/ansible-test.yml
@@ -0,0 +1,123 @@
+name: CI
+
+on:
+ push:
+ branches: [ main ]
+ pull_request:
+ branches: [ main ]
+ schedule:
+ - cron: '0 3 * * *'
+
+jobs:
+ build:
+ name: Build collection
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ ansible-version: [stable-2.12]
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v2
+
+ - name: Set up Python 3.9
+ uses: actions/setup-python@v1
+ with:
+ python-version: 3.9
+
+ - name: Install ansible (${{ matrix.ansible-version }})
+ run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check
+
+ - name: Build a collection tarball
+ run: ansible-galaxy collection build --output-path "${GITHUB_WORKSPACE}/.cache/collection-tarballs"
+
+ - name: Store migrated collection artifacts
+ uses: actions/upload-artifact@v1
+ with:
+ name: collection
+ path: .cache/collection-tarballs
+
+ ###
+ # Unit tests (OPTIONAL)
+ #
+ # https://docs.ansible.com/ansible/latest/dev_guide/testing_units.html
+
+ unit:
+ name: Unit Tests
+ needs: [build]
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version: ["3.9", "3.10", "3.11"]
+ ansible-version: [stable-2.12, stable-2.13, stable-2.14]
+ exclude:
+ # Python 3.11 is supported only from ansible-core 2.14 onwards
+ - python-version: "3.11"
+ ansible-version: stable-2.12
+ - python-version: "3.11"
+ ansible-version: stable-2.13
+
+ steps:
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v1
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install ansible (${{ matrix.ansible-version }}) version
+ run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check
+
+ - name: Download migrated collection artifacts
+ uses: actions/download-artifact@v1
+ with:
+ name: collection
+ path: .cache/collection-tarballs
+
+ - name: Setup Unit test Pre-requisites
+ run: |
+ ansible-galaxy collection install .cache/collection-tarballs/*.tar.gz
+ if [ -f /home/runner/.ansible/collections/ansible_collections/dellemc/unity/tests/requirements.txt ]; then pip install -r /home/runner/.ansible/collections/ansible_collections/dellemc/unity/tests/requirements.txt; fi
+ - name: Run Unit tests using ansible-test
+ run: ansible-test units -v --color --python ${{ matrix.python-version }} --coverage
+ working-directory: /home/runner/.ansible/collections/ansible_collections/dellemc/unity
+
+ ###
+ # Sanity tests (REQUIRED)
+ #
+ # https://docs.ansible.com/ansible/latest/dev_guide/testing_sanity.html
+
+ sanity:
+ name: Sanity Tests
+ runs-on: ubuntu-latest
+ needs: [build]
+ strategy:
+ fail-fast: false
+ matrix:
+ ansible-version: [stable-2.12, stable-2.13, stable-2.14]
+
+ steps:
+ - name: Set up Python 3.9
+ uses: actions/setup-python@v1
+ with:
+ # it is just required to run that once as "ansible-test sanity" in the docker image
+ # will run on all python versions it supports.
+ python-version: 3.9
+
+ - name: Install ansible (${{ matrix.ansible-version }}) version
+ run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check
+
+ - name: Download migrated collection artifacts
+ uses: actions/download-artifact@v1
+ with:
+ name: collection
+ path: .cache/collection-tarballs
+
+ - name: Setup Sanity test Pre-requisites
+ run: ansible-galaxy collection install .cache/collection-tarballs/*.tar.gz
+
+ # run ansible-test sanity inside of Docker.
+ # The docker container has all the pinned dependencies that are required
+ # and all python versions ansible supports.
+ - name: Run sanity tests
+ run: ansible-test sanity --docker -v --color
+ working-directory: /home/runner/.ansible/collections/ansible_collections/dellemc/unity
diff --git a/ansible_collections/dellemc/unity/CHANGELOG.rst b/ansible_collections/dellemc/unity/CHANGELOG.rst
new file mode 100644
index 000000000..55eb73ca8
--- /dev/null
+++ b/ansible_collections/dellemc/unity/CHANGELOG.rst
@@ -0,0 +1,156 @@
+===========================
+Dellemc.Unity Change Log
+===========================
+
+.. contents:: Topics
+
+
+v1.6.0
+======
+
+Minor Changes
+-------------
+
+- Support addition of host from the Host List to NFS Export in nfs module.
+- Support enable/disable advanced dedup in volume module.
+- Add synchronous replication support for filesystem.
+
+v1.5.0
+======
+
+Minor Changes
+-------------
+
+- Updated modules to adhere with ansible community guidelines.
+
+v1.4.1
+======
+
+Minor Changes
+-------------
+
+- Updated the execution environment related files.
+
+v1.4.0
+======
+
+Minor Changes
+-------------
+
+- Added cifsserver module to support create, list and delete CIFS server.
+- Added execution environment manifest file to support building an execution environment with ansible-builder.
+- Added interface module to support create, list and delete interface.
+- Added nfsserver module to support create, list and delete NFS server.
+- Check mode is supported for Info.
+- Enhance nfs module to support advanced host management option.
+- Enhanced filesystem module to support create, modify and delete of filesystem replication.
+- Enhanced info module to list cifs server, nfs servers, ethernet port and file interface.
+- Enhanced nas server module to support create, modify and delete of nas server replication.
+
+New Modules
+-----------
+
+- dellemc.unity.cifsserver - Manage CIFS server on Unity storage system
+- dellemc.unity.interface - Manage Interfaces on Unity storage system
+- dellemc.unity.nfsserver - Manage NFS server on Unity storage system
+
+v1.3.0
+======
+
+Minor Changes
+-------------
+
+- Added rotating file handler for logging.
+- Bugfix in volume module to retrieve details of non-thin volumes.
+- Enhance host module to support add/remove network address to/from a host.
+- Enhanced Info module to list disk groups.
+- Enhanced Storage Pool module to support listing of drive details of a pool
+- Enhanced Storage pool module to support creation of storage pool
+- Enhanced consistency group module to support enable/disable replication in consistency group
+- Enhanced host module to support both mapping and un-mapping of non-logged-in initiators to host.
+- Enhanced host module to support listing of network addresses, FC initiators, ISCSI initiators and allocated volumes of a host
+- Removed dellemc.unity prefix from module names.
+- Renamed gatherfacts module to info module
+
+v1.2.1
+======
+
+Minor Changes
+-------------
+
+- Added dual licensing
+- Documentation updates
+- Fixed typo in galaxy.yml
+- Updated few samples in modules
+
+v1.2.0
+======
+
+Minor Changes
+-------------
+
+- Added CRUD operations support for Quota tree.
+- Added CRUD operations support for User Quota on Filesystem/Quota tree.
+- Added support for Application tagging.
+- Consistency group module is enhanced to map/unmap hosts to/from a new or existing consistency group.
+- Filesystem module is enhanced to associate/dissociate snapshot schedule to/from a Filesystem.
+- Filesystem module is enhanced to update default quota configuration during create operation.
+- Gather facts module is enhanced to list User Quota and Quota tree components.
+- Volume module is enhanced to support map/unmap multiple hosts from a volume.
+
+New Modules
+-----------
+
+- dellemc.unity.tree_quota - Manage quota tree on the Unity storage system
+- dellemc.unity.user_quota - Manage user quota on the Unity storage system
+
+v1.1.0
+======
+
+Minor Changes
+-------------
+
+- Added CRUD operations support for Filesystem snapshot.
+- Added CRUD operations support for Filesystem.
+- Added CRUD operations support for NFS export.
+- Added CRUD operations support for SMB share.
+- Added support to get/modify operations on NAS server.
+- Gather facts module is enhanced to list Filesystem snapshots, NAS servers, File systems, NFS exports, SMB shares.
+
+New Modules
+-----------
+
+- dellemc.unity.filesystem - Manage filesystem on Unity storage system
+- dellemc.unity.filesystem_snapshot - Manage filesystem snapshot on the Unity storage system
+- dellemc.unity.nasserver - Manage NAS servers on Unity storage system
+- dellemc.unity.nfs - Manage NFS export on Unity storage system
+- dellemc.unity.smbshare - Manage SMB shares on Unity storage system
+
+v1.0.0
+======
+
+Major Changes
+-------------
+
+- Added CRUD operations support for Consistency group.
+- Added CRUD operations support for Volume.
+- Added CRUD operations support for a snapshot schedule.
+- Added support for CRUD operations on a host with FC/iSCSI initiators.
+- Added support for CRUD operations on a snapshot of a volume.
+- Added support for adding/removing volumes to/from a consistency group.
+- Added support to add/remove FC/iSCSI initiators to/from a host.
+- Added support to create a snapshot for a consistency group.
+- Added support to get/modify operations on storage pool.
+- Added support to map/unmap a host to/from a snapshot.
+- Gather facts module is enhanced to list volumes, consistency groups, FC initiators, iSCSI initiators, hosts, snapshot schedules.
+
+New Modules
+-----------
+
+- dellemc.unity.consistencygroup - Manage consistency groups on Unity storage system
+- dellemc.unity.host - Manage Host operations on Unity
+- dellemc.unity.info - Gathering information about Unity
+- dellemc.unity.snapshot - Manage snapshots on the Unity storage system
+- dellemc.unity.snapshotschedule - Manage snapshot schedules on Unity storage system
+- dellemc.unity.storagepool - Manage storage pool on Unity
+- dellemc.unity.volume - Manage volume on Unity storage system
diff --git a/ansible_collections/dellemc/unity/FILES.json b/ansible_collections/dellemc/unity/FILES.json
new file mode 100644
index 000000000..74f5f6106
--- /dev/null
+++ b/ansible_collections/dellemc/unity/FILES.json
@@ -0,0 +1,817 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/CODEOWNERS",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "02e53b61090f135ec71115de45f5be3ec18cf5ebe90c17eeb41e4a15a0fa5df5",
+ "format": 1
+ },
+ {
+ "name": ".github/Contributers_guide.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b489058fec0d5daedd1fd9217c9c2728ce5bec06d24a3b69bb7f7a421120436c",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/ask-a-question.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ebe3e2a8aeb7d3254cd3f60fad416448b5958aca051782a543c67a197fc5aa7",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/bug_report.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18219339f0847334abfbf50897e1d040b24a36e89ce00d170901b56f66c0dec5",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "047b3829a06d012dea33c49b95f566712915a3382147bab113ca80ff627bb606",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/feature_request.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f0230119029f94df098f49a92dfedb69a8009405a8a6275bbf15701b7b78a7b",
+ "format": 1
+ },
+ {
+ "name": ".github/pull_request_template.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97fbe691e21c019419fe894b4b059805a13fcf27a3ffe62a131ec9276f9dff48",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/ansible-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36061bc14911cf2e9f7a4bd7c15771f40cc19c6153de788a79b02fe39d776684",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1de2b419f19d4fc6298d49bcc811ead97876e67da345da4569ae7d68c5ec2e72",
+ "format": 1
+ },
+ {
+ "name": "LICENSE",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b1ba204bb69a0ade2bfcf65ef294a920f6bb361b317dba43c7ef29d96332b9b",
+ "format": 1
+ },
+ {
+ "name": "MODULE-LICENSE",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a787189adc44f92c54d358fc05673e8a96815159f8b8296e5eed187171089bc5",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/.plugin-cache.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bf36094cdc0a4661da6b69615073ece39cece910d58dbdd889c4e99070fbbd0",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b7fd9dd55038169d69b139a961a0501c0805bc1a86a046750c76f4a94b366c19",
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ad32bcb3927f6676bb3f1501ad872b55c87129d2266412051e57847a65508b1",
+ "format": 1
+ },
+ {
+ "name": "docs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/ADOPTERS.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c81933a41503275063d789f0685472e1603e4614376f3918b42c4bfb210a2c01",
+ "format": 1
+ },
+ {
+ "name": "docs/BRANCHING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae716ec9cdebdf9f24166ba485eba474de335db37bcf51e50d65dad5a6fdde85",
+ "format": 1
+ },
+ {
+ "name": "docs/CODE_OF_CONDUCT.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d47343b6cae5e147a16cc1c312461d175f29d89c8c6094e024d6cb885f5cc36",
+ "format": 1
+ },
+ {
+ "name": "docs/COMMITTER_GUIDE.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b41b38fe09cfcbfb4499c39ed4822a9f8c3f5d562e68dad45b5f2389f18053b5",
+ "format": 1
+ },
+ {
+ "name": "docs/CONTRIBUTING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "598e9bdfdfb5fcbcba74e866d2680c80e75e8c3e9567c4706df9de36660b1490",
+ "format": 1
+ },
+ {
+ "name": "docs/INSTALLATION.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6fb197119f7cb5a83d6079ea4abe69c8c44a29f1686909ed08cc0bf05b67f2d",
+ "format": 1
+ },
+ {
+ "name": "docs/ISSUE_TRIAGE.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "846a696c85036bd2e65dc8517932ec8dbf88c305726a230fdcc4a75e989a2d53",
+ "format": 1
+ },
+ {
+ "name": "docs/MAINTAINERS.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0758ca62988538fa1b83403308bfac6637b523dd20e769b889a2c54561ae1512",
+ "format": 1
+ },
+ {
+ "name": "docs/MAINTAINER_GUIDE.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1e926f5ecbcb3c016e8394fd386a22d0a9235cd6e049b84a9dce42990c60fa8",
+ "format": 1
+ },
+ {
+ "name": "docs/Release Notes.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "25fc0f22b54c1ecc5fb94cf6f0546600aa444416f79a57c8d367adbd83c1e565",
+ "format": 1
+ },
+ {
+ "name": "docs/SECURITY.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b80e365d12066e0f6c0cf7dce905b1530fc827b4c8111f6533910982ab196174",
+ "format": 1
+ },
+ {
+ "name": "docs/SUPPORT.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc16b1e742969c2c208f6ea60211b637878f6278212f2a123b333b6448288733",
+ "format": 1
+ },
+ {
+ "name": "docs/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/modules/cifsserver.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ade3ef8ba26c63eeff0dd582a8f9b8a2616d96cb7066bc84410e6ed4c0c02fc5",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/consistencygroup.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a0ac0e3028289436bc9dcc07e14821fab118552e56e28558541346628b73fab",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/filesystem.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f93698d8d35931abff7bb6262ac10e3603aa300878248001d3566d1ec39c0ee2",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/filesystem_snapshot.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0974b3beba09ff4a44a6977be12764377213f960a0a0c08e2be3fc4a025b4583",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/host.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b78755a1bdbec795874457f3ee5e63a49d60fb21653617fb58fbefe6f8144282",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/info.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19af0ad15ef490c6520d3001657e49b31c386e745d7cf61ac88898bf363ea50d",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/interface.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14bec74112975514f622216f2b2f272197b96f0cb6f99e41ade9491af5bda29e",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/nasserver.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0433167e81af69381b7bb7e5350d1414ba6a86de1b66ef82b964d180a9d17229",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/nfs.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a1471c26c1b8144f2c159069276b4a7e0d2eb34ab5fb82def30ac9a0c224ef9",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/nfsserver.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95005625b57eeb149e047a58cc5f8cd395d48bdf4ac533ab37af7c92ef6c0b01",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/smbshare.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ae4d6ac050fe829100c4844d6663de2c60f68bd16a18942466c4fa879aaffb5",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/snapshot.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f92f063ad273c68ddc443dc638d4d9bbef928796f8be56ff8ba3257edf222fee",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/snapshotschedule.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bef45c4ee0716258b1afd49736cd566ee6f0f7332e533c2d8e438ede0a70fd55",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/storagepool.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb19a36818f64d71bf2ba8836568d36dce82b2d4a112f7d516e4b7a9d7e46b18",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/tree_quota.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68e0ed5f7d7fd816a58711b36faef61aed44c23b67624d0c669e04a058329cf2",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/user_quota.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e973112cf06e41e27c465d0e375045e6f0bfc6954a5b12325ff189c92885b9b",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/volume.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "206b027c28b5a0e7c4e40d6402ca45e0c6cc67c8623821c3077bf3b00a54dd13",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/execution-environment.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6fecf89b56560e79560dba9f860201828a8df82323d02c3a6a4e5fbfaa0aed3a",
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ad0fa1a5cceaac69ea46df66d57fe6f290544c8efa6fabd2a2982296e428536",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/unity.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59baa9afd0063cf6ccc64d0be2f93bcdac944d2d53d9a9808654d5715173eab2",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/dell",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/dell/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/dell/logging_handler.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a940654af74df1d2ed5d35691ee46a542190227e414d87d68c471c3210202750",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/dell/utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7fec20103cba14b247e99885a64d5f447876743d1a2c1aabfa41344fa3b5811a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cifsserver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad663a0af0d1f0dc86ea15772d49cb79813e5970abeadc5fa88ff0fbb798f1c9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/consistencygroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84a261b6260c02d87c16108a949a7a9861c837d8b90630059d159418986a2167",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/filesystem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea31747f729d9e933d6f10ceee77c69a8fe924a9b8b55ea52eabd65a4c48e69c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/filesystem_snapshot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d01454f787b460865970a4a0607590874a8ac974b03e10fc4a336ae9ed97522",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c84b7702c1aa417739ac3a3e6e5102ee5a0489b71f481bd5b33d80d73ed01ba0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f19b0105b22885b546964e9f8316c837cc0ddb0a91d94a07f84317a9358eedd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/interface.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "492628928153228d7934856ec4861b169540aa2cba74c89d493705ce243b3661",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nasserver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe7701ebc60a48151cc72b7463cf8bf3c73c31adb1dded7e487ab49054f95112",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nfs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93f0a525a5f4a4da9e3d51526e97539a7d2929baf68d4f3b048ec1ea63b79528",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nfsserver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40554df77af25ca89f7cc4757fd45d8493a269d136e1818f5c2fc3584372de1b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/smbshare.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b40e8869df87faefa535902c72a32c68f650ae77f5a0ad9941d12e2c97dbbb1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/snapshot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8029c7c788f29e0079f78b3c5ded6194aab6fa32e8e63491c43415a543cfecc5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/snapshotschedule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "046673dba8971fc40b4ccdce39de288b6e9ba7f1a18963f3cd486ab0d24e9dd6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storagepool.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59696ca229b897b7f546c4e332143d4591aaf4eea3df2d3e808cbff0204f1d7c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/tree_quota.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ada935019e3b1e4fc61cae3c4134fbf1481058fb8245be836000850ad56e7009",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/user_quota.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "228c82f62dca70b8c4ab613c6c935ed022a44c014b2d3752e9cd2856e876707b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d4064bc0a20e43ba0c45110563315f481220d485af3a2289fc8a4786f8b3814",
+ "format": 1
+ },
+ {
+ "name": "requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea96c62d419724d448e0ed0b2099f5242e6a9cc26abca64844f1ed99c082e844",
+ "format": 1
+ },
+ {
+ "name": "requirements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60355bcbb93016c5bff4bc3a5d1eb99d26cd74d0da681517e2a20545b71501d4",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65e6091d1c8d88a703555bd13590bb95248fb0b7376d3ed1d660e2b9d65581c8",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.12.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e85c8aca7d338809e87300cf372956e1b4850ece6bc475f6fa4c66ca405812a",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.13.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb03c71bf8838c2b75394a5100397d843b8741a8d3814446f928c7ddaa265ffd",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.14.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb03c71bf8838c2b75394a5100397d843b8741a8d3814446f928c7ddaa265ffd",
+ "format": 1
+ },
+ {
+ "name": "tests/unit",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_api_exception.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7a0fa0bda40ed6b6bbd4193bb68a8c8402864506aa9031f655fc102ca49bfb6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_cifsserver_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b864ceef5c5db21f3282a76094c8d9226186dadebf9851432fff4fb57e59cfad",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_consistencygroup_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6df144fe81cfe2a4d59f7ff5c4d0f22925b0c9b9d6fadf56fee25f3b046efe47",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_filesystem_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "171c7ad10cbe32e041c124a91fc1c34fa6c80585da66afe7938c6133031e8202",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_host_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d7e6f5d137f6977389d84485ddffd09099d13b756256a8190b506b86089db1a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_interface_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c8de82b6b7311a2ec191fc510d0bc06cde5e0627f74e83711de69c629e826fc",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_nasserver_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5f8e78abd2a70ecb4d42b4384b872f2075c9bc91bdca683a42787a0f5ce9851",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_nfs_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3601f6396ddf5e35dd3e80de3d78df967ef4c972cfa895ac00e0f95bde1bcad6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_nfsserver_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03b34a46b5c55696c4741dbd3749b1a654cf401e29e79443908145ef87ff5994",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_sdk_response.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0813900fa591ec50510b41bfca7960791baf5ed9f08c2b41a09483b7011c21b4",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_storagepool_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb7fd67ca3924ba62b179b93f879ff07f16a1ada51742c12cd947e48e0aad26f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_volume_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80d7788c208356919f39c49c924745263eb96806ca95ce60a1030822455ed48e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_cifsserver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7031be0fee19af368f61e6f07404c941ec64139f066514eeca48a5f3c9224749",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_consistencygroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "684935db745c549cae4c337b85fede00ab0f95dea56c129c158abfb006cba901",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_filesystem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3befdfe527cfd705aeca63979e52d7439bf3bdc8ccac246b1f2b600d10100456",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbc6464fdcbc66df43da1ef26cbf64d86b77406f28ded032dc7c7c4da6034cd0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_interface.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10476e71c1542a43c01b1c9622dbc54b32ae3512fd1cd3ecd2dbec61b06b373b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_nasserver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09ef2b39687911942250b40ba350920a496e00dc864f05b8b46d2a79958769b1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_nfs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6db29526aaea6d815254195fc9e0e80b130950d9dec6ef4b6a60884239d02af2",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_nfsserver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72726040ab5aff9e023872018e3abd192dba24acc41be7137d3f98ae7712c444",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_storagepool.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63dafda1a4e630b23a637affabacfefd5c14a3231a0fae02d3886b0bf7656525",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5bc48d2969cfaa5670ab538ba51cef532e1c3177004e2a2d6dbbd2cd7b4e7714",
+ "format": 1
+ },
+ {
+ "name": "ansible.cfg",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5360ab997ea2c7ed8a6efc7e8324e7b6ec7479af057fe15ff23fe885f05b58b2",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/dellemc/unity/LICENSE b/ansible_collections/dellemc/unity/LICENSE
new file mode 100644
index 000000000..e72bfddab
--- /dev/null
+++ b/ansible_collections/dellemc/unity/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>. \ No newline at end of file
diff --git a/ansible_collections/dellemc/unity/MANIFEST.json b/ansible_collections/dellemc/unity/MANIFEST.json
new file mode 100644
index 000000000..bff7c8f2f
--- /dev/null
+++ b/ansible_collections/dellemc/unity/MANIFEST.json
@@ -0,0 +1,41 @@
+{
+ "collection_info": {
+ "namespace": "dellemc",
+ "name": "unity",
+ "version": "1.6.0",
+ "authors": [
+ "Akash Shendge <ansible.team@dell.com>",
+ "Ambuj Dubey <ansible.team@dell.com>",
+ "Arindam Datta <ansible.team@dell.com>",
+ "P Srinivas Rao <ansible.team@dell.com>",
+ "Rajshree Khare <ansible.team@dell.com>",
+ "Vivek Soni <ansible.team@dell.com>",
+ "Spandita Panigrahi <ansible.team@dell.com>",
+ "Ananthu S Kuttattu <ansible.team@dell.com>",
+ "Pavan Mudunuri <ansible.team@dell.com>"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "storage"
+ ],
+ "description": "Ansible modules for Unity",
+ "license": [
+ "GPL-3.0-or-later",
+ "Apache-2.0"
+ ],
+ "license_file": null,
+ "dependencies": {},
+ "repository": "https://github.com/dell/ansible-unity/tree/1.6.0",
+ "documentation": "https://github.com/dell/ansible-unity/tree/1.6.0/docs",
+ "homepage": "https://github.com/dell/ansible-unity/tree/1.6.0",
+ "issues": "https://www.dell.com/community/Automation/bd-p/Automation"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de87855f156b345c7d278a09b66679818c6ddcf5544a868ec8bd68dc4c2d5162",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/dellemc/unity/MODULE-LICENSE b/ansible_collections/dellemc/unity/MODULE-LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/ansible_collections/dellemc/unity/MODULE-LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/ansible_collections/dellemc/unity/README.md b/ansible_collections/dellemc/unity/README.md
new file mode 100644
index 000000000..58dbb9539
--- /dev/null
+++ b/ansible_collections/dellemc/unity/README.md
@@ -0,0 +1,71 @@
+# Ansible Modules for Dell Technologies Unity
+
+The Ansible Modules for Dell Technologies (Dell) Unity allow Data Center and IT administrators to use RedHat Ansible to automate and orchestrate the configuration and management of Dell Unity arrays.
+
+The capabilities of the Ansible modules are managing consistency groups, filesystem, filesystem snapshots, CIFS server, NAS server, NFS server, NFS export, SMB share, interface, hosts, snapshots, snapshot schedules, storage pools, user quotas, quota trees and volumes. Capabilities also include gathering facts from the array. The options available for each are list, show, create, modify and delete. These tasks can be executed by running simple playbooks written in yaml syntax. The modules are written so that all the operations are idempotent, so making multiple identical requests has the same effect as making a single request.
+
+## Table of contents
+
+* [Code of conduct](https://github.com/dell/ansible-unity/blob/1.6.0/docs/CODE_OF_CONDUCT.md)
+* [Maintainer guide](https://github.com/dell/ansible-unity/blob/1.6.0/docs/MAINTAINER_GUIDE.md)
+* [Committer guide](https://github.com/dell/ansible-unity/blob/1.6.0/docs/COMMITTER_GUIDE.md)
+* [Contributing guide](https://github.com/dell/ansible-unity/blob/1.6.0/docs/CONTRIBUTING.md)
+* [Branching strategy](https://github.com/dell/ansible-unity/blob/1.6.0/docs/BRANCHING.md)
+* [List of adopters](https://github.com/dell/ansible-unity/blob/1.6.0/docs/ADOPTERS.md)
+* [Maintainers](https://github.com/dell/ansible-unity/blob/1.6.0/docs/MAINTAINERS.md)
+* [Support](https://github.com/dell/ansible-unity/blob/1.6.0/docs/SUPPORT.md)
+* [License](#license)
+* [Security](https://github.com/dell/ansible-unity/blob/1.6.0/docs/SECURITY.md)
+* [Prerequisites](#prerequisites)
+* [List of Ansible modules for Dell Unity](#list-of-ansible-modules-for-dell-unity)
+* [Installation and execution of Ansible modules for Dell Unity](#installation-and-execution-of-ansible-modules-for-dell-unity)
+* [Releasing, Maintenance and Deprecation](#releasing-maintenance-and-deprecation)
+
+## License
+The Ansible collection for Unity is released and licensed under the GPL-3.0 license. See [LICENSE](https://github.com/dell/ansible-unity/blob/1.6.0/LICENSE) for the full terms. Ansible modules and module utilities that are part of the Ansible collection for Unity are released and licensed under the Apache 2.0 license. See [MODULE-LICENSE](https://github.com/dell/ansible-unity/blob/1.6.0/MODULE-LICENSE) for the full terms.
+
+## Supported Platforms
+ * Dell Unity Arrays version 5.1, 5.2
+
+## Prerequisites
+This table provides information about the software prerequisites for the Ansible Modules for Dell Unity.
+
+| **Ansible Modules** | **Python version** | **Storops - Python SDK version** | **Ansible** |
+|---------------------|--------------------|----------------------------------|-------------|
+| v1.6.0 | 3.9 <br> 3.10 <br> 3.11 | 1.2.11 | 2.12 <br> 2.13 <br> 2.14|
+
+## Idempotency
+The modules are written in such a way that all requests are idempotent and hence fault-tolerant. It essentially means that the result of a successfully performed request is independent of the number of times it is executed.
+
+## List of Ansible Modules for Dell Unity
+ * [Consistency group module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/consistencygroup.rst)
+ * [Filesystem module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/filesystem.rst)
+ * [Filesystem snapshot module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/filesystem_snapshot.rst)
+ * [Info module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/info.rst)
+ * [Host module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/host.rst)
+ * [CIFS server module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/cifsserver.rst)
+ * [NAS server module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/nasserver.rst)
+ * [NFS server module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/nfsserver.rst)
+ * [NFS export module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/nfs.rst)
+ * [SMB share module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/smbshare.rst)
+ * [Interface module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/interface.rst)
+ * [Snapshot module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/snapshot.rst)
+ * [Snapshot schedule module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/snapshotschedule.rst)
+ * [Storage pool module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/storagepool.rst)
+ * [User quota module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/user_quota.rste)
+ * [Quota tree module ](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/tree_quota.rst)
+ * [Volume module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/volume.rst)
+
+## Installation and execution of Ansible modules for Dell Unity
+
+The installation and execution steps of Ansible modules for Dell Unity can be found [here](https://github.com/dell/ansible-unity/blob/1.6.0/docs/INSTALLATION.md).
+
+## Releasing, Maintenance and Deprecation
+
+Ansible Modules for Dell Technnologies Unity follows [Semantic Versioning](https://semver.org/).
+
+New version will be release regularly if significant changes (bug fix or new feature) are made in the collection.
+
+Released code versions are located on "release" branches with names of the form "release-x.y.z" where x.y.z corresponds to the version number. More information on branching strategy followed can be found [here](https://github.com/dell/ansible-unity/blob/1.6.0/docs/BRANCHING.md).
+
+Ansible Modules for Dell Technologies Unity deprecation cycle is aligned with that of [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html).
diff --git a/ansible_collections/dellemc/unity/ansible.cfg b/ansible_collections/dellemc/unity/ansible.cfg
new file mode 100644
index 000000000..c10d1da22
--- /dev/null
+++ b/ansible_collections/dellemc/unity/ansible.cfg
@@ -0,0 +1,484 @@
+# config file for ansible -- https://ansible.com/
+# ===============================================
+
+# nearly all parameters can be overridden in ansible-playbook
+# or with command line flags. ansible will read ANSIBLE_CONFIG,
+# ansible.cfg in the current working directory, .ansible.cfg in
+# the home directory or /etc/ansible/ansible.cfg, whichever it
+# finds first
+
+[defaults]
+
+# some basic default values...
+
+#inventory = /etc/ansible/hosts
+#library = /usr/share/my_modules/
+#module_utils = /usr/share/my_module_utils/
+#remote_tmp = ~/.ansible/tmp
+#local_tmp = ~/.ansible/tmp
+#plugin_filters_cfg = /etc/ansible/plugin_filters.yml
+#forks = 5
+#poll_interval = 15
+#sudo_user = root
+#ask_sudo_pass = True
+#ask_pass = True
+#transport = smart
+#remote_port = 22
+#module_lang = C
+#module_set_locale = False
+
+# plays will gather facts by default, which contain information about
+# the remote system.
+#
+# smart - gather by default, but don't regather if already gathered
+# implicit - gather by default, turn off with gather_facts: False
+# explicit - do not gather by default, must say gather_facts: True
+#gathering = implicit
+
+# This only affects the gathering done by a play's gather_facts directive,
+# by default gathering retrieves all facts subsets
+# all - gather all subsets
+# network - gather min and network facts
+# hardware - gather hardware facts (longest facts to retrieve)
+# virtual - gather min and virtual facts
+# facter - import facts from facter
+# ohai - import facts from ohai
+# You can combine them using comma (ex: network,virtual)
+# You can negate them using ! (ex: !hardware,!facter,!ohai)
+# A minimal set of facts is always gathered.
+#gather_subset = all
+
+# some hardware related facts are collected
+# with a maximum timeout of 10 seconds. This
+# option lets you increase or decrease that
+# timeout to something more suitable for the
+# environment.
+# gather_timeout = 10
+
+# additional paths to search for roles in, colon separated
+#roles_path = /etc/ansible/roles
+
+# uncomment this to disable SSH key host checking
+#host_key_checking = False
+
+# change the default callback, you can only have one 'stdout' type enabled at a time.
+#stdout_callback = skippy
+
+
+## Ansible ships with some plugins that require whitelisting,
+## this is done to avoid running all of a type by default.
+## These setting lists those that you want enabled for your system.
+## Custom plugins should not need this unless plugin author specifies it.
+
+# enable callback plugins, they can output to stdout but cannot be 'stdout' type.
+#callback_whitelist = timer, mail
+
+# Determine whether includes in tasks and handlers are "static" by
+# default. As of 2.0, includes are dynamic by default. Setting these
+# values to True will make includes behave more like they did in the
+# 1.x versions.
+#task_includes_static = False
+#handler_includes_static = False
+
+# Controls if a missing handler for a notification event is an error or a warning
+#error_on_missing_handler = True
+
+# change this for alternative sudo implementations
+#sudo_exe = sudo
+
+# What flags to pass to sudo
+# WARNING: leaving out the defaults might create unexpected behaviours
+#sudo_flags = -H -S -n
+
+# SSH timeout
+#timeout = 10
+
+# default user to use for playbooks if user is not specified
+# (/usr/bin/ansible will use current user as default)
+#remote_user = root
+
+# logging is off by default unless this path is defined
+# if so defined, consider logrotate
+#log_path = /var/log/ansible.log
+
+# default module name for /usr/bin/ansible
+#module_name = command
+
+# use this shell for commands executed under sudo
+# you may need to change this to bin/bash in rare instances
+# if sudo is constrained
+#executable = /bin/sh
+
+# if inventory variables overlap, does the higher precedence one win
+# or are hash values merged together? The default is 'replace' but
+# this can also be set to 'merge'.
+#hash_behaviour = replace
+
+# by default, variables from roles will be visible in the global variable
+# scope. To prevent this, the following option can be enabled, and only
+# tasks and handlers within the role will see the variables there
+#private_role_vars = yes
+
+# list any Jinja2 extensions to enable here:
+#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
+
+# if set, always use this private key file for authentication, same as
+# if passing --private-key to ansible or ansible-playbook
+#private_key_file = /path/to/file
+
+# If set, configures the path to the Vault password file as an alternative to
+# specifying --vault-password-file on the command line.
+#vault_password_file = /path/to/vault_password_file
+
+# format of string {{ ansible_managed }} available within Jinja2
+# templates indicates to users editing templates files will be replaced.
+# replacing {file}, {host} and {uid} and strftime codes with proper values.
+#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
+# {file}, {host}, {uid}, and the timestamp can all interfere with idempotence
+# in some situations so the default is a static string:
+#ansible_managed = Ansible managed
+
+# by default, ansible-playbook will display "Skipping [host]" if it determines a task
+# should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
+# messages. NOTE: the task header will still be shown regardless of whether or not the
+# task is skipped.
+#display_skipped_hosts = True
+
+# by default, if a task in a playbook does not include a name: field then
+# ansible-playbook will construct a header that includes the task's action but
+# not the task's args. This is a security feature because ansible cannot know
+# if the *module* considers an argument to be no_log at the time that the
+# header is printed. If your environment doesn't have a problem securing
+# stdout from ansible-playbook (or you have manually specified no_log in your
+# playbook on all of the tasks where you have secret information) then you can
+# safely set this to True to get more informative messages.
+#display_args_to_stdout = False
+
+# by default (as of 1.3), Ansible will raise errors when attempting to dereference
+# Jinja2 variables that are not set in templates or action lines. Uncomment this line
+# to revert the behavior to pre-1.3.
+#error_on_undefined_vars = False
+
+# by default (as of 1.6), Ansible may display warnings based on the configuration of the
+# system running ansible itself. This may include warnings about 3rd party packages or
+# other conditions that should be resolved if possible.
+# to disable these warnings, set the following value to False:
+#system_warnings = True
+
+# by default (as of 1.4), Ansible may display deprecation warnings for language
+# features that should no longer be used and will be removed in future versions.
+# to disable these warnings, set the following value to False:
+#deprecation_warnings = True
+
+# (as of 1.8), Ansible can optionally warn when usage of the shell and
+# command module appear to be simplified by using a default Ansible module
+# instead. These warnings can be silenced by adjusting the following
+# setting or adding warn=yes or warn=no to the end of the command line
+# parameter string. This will for example suggest using the git module
+# instead of shelling out to the git command.
+# command_warnings = False
+
+
+# set plugin path directories here, separate with colons
+#action_plugins = /usr/share/ansible/plugins/action
+#cache_plugins = /usr/share/ansible/plugins/cache
+#callback_plugins = /usr/share/ansible/plugins/callback
+#connection_plugins = /usr/share/ansible/plugins/connection
+#lookup_plugins = /usr/share/ansible/plugins/lookup
+#inventory_plugins = /usr/share/ansible/plugins/inventory
+#vars_plugins = /usr/share/ansible/plugins/vars
+#filter_plugins = /usr/share/ansible/plugins/filter
+#test_plugins = /usr/share/ansible/plugins/test
+#terminal_plugins = /usr/share/ansible/plugins/terminal
+#strategy_plugins = /usr/share/ansible/plugins/strategy
+
+
+# by default, ansible will use the 'linear' strategy but you may want to try
+# another one
+#strategy = free
+
+# by default callbacks are not loaded for /bin/ansible, enable this if you
+# want, for example, a notification or logging callback to also apply to
+# /bin/ansible runs
+#bin_ansible_callbacks = False
+
+
+# don't like cows? that's unfortunate.
+# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
+#nocows = 1
+
+# set which cowsay stencil you'd like to use by default. When set to 'random',
+# a random stencil will be selected for each task. The selection will be filtered
+# against the `cow_whitelist` option below.
+#cow_selection = default
+#cow_selection = random
+
+# when using the 'random' option for cowsay, stencils will be restricted to this list.
+# it should be formatted as a comma-separated list with no spaces between names.
+# NOTE: line continuations here are for formatting purposes only, as the INI parser
+# in python does not support them.
+#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
+# hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
+# stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www
+
+# don't like colors either?
+# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
+#nocolor = 1
+
+# if set to a persistent type (not 'memory', for example 'redis') fact values
+# from previous runs in Ansible will be stored. This may be useful when
+# wanting to use, for example, IP information from one group of servers
+# without having to talk to them in the same playbook run to get their
+# current IP information.
+#fact_caching = memory
+
+
+# retry files
+# When a playbook fails by default a .retry file will be created in ~/
+# You can disable this feature by setting retry_files_enabled to False
+# and you can change the location of the files by setting retry_files_save_path
+
+#retry_files_enabled = False
+#retry_files_save_path = ~/.ansible-retry
+
+# squash actions
+# Ansible can optimise actions that call modules with list parameters
+# when looping. Instead of calling the module once per with_ item, the
+# module is called once with all items at once. Currently this only works
+# under limited circumstances, and only with parameters named 'name'.
+#squash_actions = apk,apt,dnf,homebrew,pacman,pkgng,yum,zypper
+
+# prevents logging of task data, off by default
+#no_log = False
+
+# prevents logging of tasks, but only on the targets, data is still logged on the master/controller
+#no_target_syslog = False
+
+# controls whether Ansible will raise an error or warning if a task has no
+# choice but to create world readable temporary files to execute a module on
+# the remote machine. This option is False by default for security. Users may
+# turn this on to have behaviour more like Ansible prior to 2.1.x. See
+# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user
+# for more secure ways to fix this than enabling this option.
+#allow_world_readable_tmpfiles = False
+
+# controls the compression level of variables sent to
+# worker processes. At the default of 0, no compression
+# is used. This value must be an integer from 0 to 9.
+#var_compression_level = 9
+
+# controls what compression method is used for new-style ansible modules when
+# they are sent to the remote system. The compression types depend on having
+# support compiled into both the controller's python and the client's python.
+# The names should match with the python Zipfile compression types:
+# * ZIP_STORED (no compression. available everywhere)
+# * ZIP_DEFLATED (uses zlib, the default)
+# These values may be set per host via the ansible_module_compression inventory
+# variable
+#module_compression = 'ZIP_DEFLATED'
+
+# This controls the cutoff point (in bytes) on --diff for files
+# set to 0 for unlimited (RAM may suffer!).
+#max_diff_size = 1048576
+
+# This controls how ansible handles multiple --tags and --skip-tags arguments
+# on the CLI. If this is True then multiple arguments are merged together. If
+# it is False, then the last specified argument is used and the others are ignored.
+# This option will be removed in 2.8.
+#merge_multiple_cli_flags = True
+
+# Controls showing custom stats at the end, off by default
+#show_custom_stats = True
+
+# Controls which files to ignore when using a directory as inventory with
+# possibly multiple sources (both static and dynamic)
+#inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo
+
+# This family of modules use an alternative execution path optimized for network appliances
+# only update this setting if you know how this works, otherwise it can break module execution
+#network_group_modules=eos, nxos, ios, iosxr, junos, vyos
+
+# When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as
+# a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain
+# jinja2 templating language which will be run through the templating engine.
+# ENABLING THIS COULD BE A SECURITY RISK
+#allow_unsafe_lookups = False
+
+# set default errors for all plays
+#any_errors_fatal = False
+
+[inventory]
+# enable inventory plugins, default: 'host_list', 'script', 'yaml', 'ini'
+#enable_plugins = host_list, virtualbox, yaml, constructed
+
+# ignore these extensions when parsing a directory as inventory source
+#ignore_extensions = .pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, ~, .orig, .ini, .cfg, .retry
+
+# ignore files matching these patterns when parsing a directory as inventory source
+#ignore_patterns=
+
+# If 'true' unparsed inventory sources become fatal errors, they are warnings otherwise.
+#unparsed_is_failed=False
+
+[privilege_escalation]
+#become=True
+#become_method=sudo
+#become_user=root
+#become_ask_pass=False
+
+[paramiko_connection]
+
+# uncomment this line to cause the paramiko connection plugin to not record new host
+# keys encountered. Increases performance on new host additions. Setting works independently of the
+# host key checking setting above.
+#record_host_keys=False
+
+# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
+# line to disable this behaviour.
+#pty=False
+
+# paramiko will default to looking for SSH keys initially when trying to
+# authenticate to remote devices. This is a problem for some network devices
+# that close the connection after a key failure. Uncomment this line to
+# disable the Paramiko look for keys function
+#look_for_keys = False
+
+# When using persistent connections with Paramiko, the connection runs in a
+# background process. If the host doesn't already have a valid SSH key, by
+# default Ansible will prompt to add the host key. This will cause connections
+# running in background processes to fail. Uncomment this line to have
+# Paramiko automatically add host keys.
+#host_key_auto_add = True
+
+[ssh_connection]
+
+# ssh arguments to use
+# Leaving off ControlPersist will result in poor performance, so use
+# paramiko on older platforms rather than removing it, -C controls compression use
+#ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s
+
+# The base directory for the ControlPath sockets.
+# This is the "%(directory)s" in the control_path option
+#
+# Example:
+# control_path_dir = /tmp/.ansible/cp
+#control_path_dir = ~/.ansible/cp
+
+# The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname,
+# port and username (empty string in the config). The hash mitigates a common problem users
+# found with long hostames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format.
+# In those cases, a "too long for Unix domain socket" ssh error would occur.
+#
+# Example:
+# control_path = %(directory)s/%%h-%%r
+#control_path =
+
+# Enabling pipelining reduces the number of SSH operations required to
+# execute a module on the remote server. This can result in a significant
+# performance improvement when enabled, however when using "sudo:" you must
+# first disable 'requiretty' in /etc/sudoers
+#
+# By default, this option is disabled to preserve compatibility with
+# sudoers configurations that have requiretty (the default on many distros).
+#
+#pipelining = False
+
+# Control the mechanism for transferring files (old)
+# * smart = try sftp and then try scp [default]
+# * True = use scp only
+# * False = use sftp only
+#scp_if_ssh = smart
+
+# Control the mechanism for transferring files (new)
+# If set, this will override the scp_if_ssh option
+# * sftp = use sftp to transfer files
+# * scp = use scp to transfer files
+# * piped = use 'dd' over SSH to transfer files
+# * smart = try sftp, scp, and piped, in that order [default]
+#transfer_method = smart
+
+# if False, sftp will not use batch mode to transfer files. This may cause some
+# types of file transfer failures impossible to catch however, and should
+# only be disabled if your sftp version has problems with batch mode
+#sftp_batch_mode = False
+
+# The -tt argument is passed to ssh when pipelining is not enabled because sudo
+# requires a tty by default.
+#use_tty = True
+
+[persistent_connection]
+
+# Configures the persistent connection timeout value in seconds. This value is
+# how long the persistent connection will remain idle before it is destroyed.
+# If the connection doesn't receive a request before the timeout value
+# expires, the connection is shutdown. The default value is 30 seconds.
+#connect_timeout = 30
+
+# Configures the persistent connection retry timeout. This value configures the
+# the retry timeout that ansible-connection will wait to connect
+# to the local domain socket. This value must be larger than the
+# ssh timeout (timeout) and less than persistent connection idle timeout (connect_timeout).
+# The default value is 15 seconds.
+#connect_retry_timeout = 15
+
+# The command timeout value defines the amount of time to wait for a command
+# or RPC call before timing out. The value for the command timeout must
+# be less than the value of the persistent connection idle timeout (connect_timeout)
+# The default value is 10 second.
+#command_timeout = 10
+
+[accelerate]
+#accelerate_port = 5099
+#accelerate_timeout = 30
+#accelerate_connect_timeout = 5.0
+
+# The daemon timeout is measured in minutes. This time is measured
+# from the last activity to the accelerate daemon.
+#accelerate_daemon_timeout = 30
+
+# If set to yes, accelerate_multi_key will allow multiple
+# private keys to be uploaded to it, though each user must
+# have access to the system via SSH to add a new key. The default
+# is "no".
+#accelerate_multi_key = yes
+
+[selinux]
+# file systems that require special treatment when dealing with security context
+# the default behaviour that copies the existing context or uses the user default
+# needs to be changed to use the file system dependent context.
+#special_context_filesystems=nfs,vboxsf,fuse,ramfs,9p
+
+# Set this to yes to allow libvirt_lxc connections to work without SELinux.
+#libvirt_lxc_noseclabel = yes
+
+[colors]
+#highlight = white
+#verbose = blue
+#warn = bright purple
+#error = red
+#debug = dark gray
+#deprecate = purple
+#skip = cyan
+#unreachable = red
+#ok = green
+#changed = yellow
+#diff_add = green
+#diff_remove = red
+#diff_lines = cyan
+
+
+[diff]
+# Always print diff when running ( same as always running with -D/--diff )
+# always = no
+
+# Set how many context lines to show in diff
+# context = 3
+
+[galaxy]
+server_list = automation_hub
+
+[galaxy_server.automation_hub]
+url=https://cloud.redhat.com/api/automation-hub/
+auth_url=https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token/
+
+token=eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJhZDUyMjdhMy1iY2ZkLTRjZjAtYTdiNi0zOTk4MzVhMDg1NjYifQ.eyJpYXQiOjE2NzkzMDkyMTcsImp0aSI6IjJmZTdjZjA1LTAxZDQtNDMwMi1iMWNlLTgzNjlhNWJmNjViMyIsImlzcyI6Imh0dHBzOi8vc3NvLnJlZGhhdC5jb20vYXV0aC9yZWFsbXMvcmVkaGF0LWV4dGVybmFsIiwiYXVkIjoiaHR0cHM6Ly9zc28ucmVkaGF0LmNvbS9hdXRoL3JlYWxtcy9yZWRoYXQtZXh0ZXJuYWwiLCJzdWIiOiJmOjUyOGQ3NmZmLWY3MDgtNDNlZC04Y2Q1LWZlMTZmNGZlMGNlNjpqZW5uaWZlcl9qb2huIiwidHlwIjoiT2ZmbGluZSIsImF6cCI6ImNsb3VkLXNlcnZpY2VzIiwibm9uY2UiOiJmZTY2MGYxMS1kODFjLTQ2YWItYTkzNS1hZTAxZmY2MjA2OTciLCJzZXNzaW9uX3N0YXRlIjoiMzI3ZDlhNjgtZTkxMi00N2NiLWI3NDctNWE5YmQzZTJlZjlmIiwic2NvcGUiOiJvcGVuaWQgYXBpLmlhbS5zZXJ2aWNlX2FjY291bnRzIGFwaS5pYW0ub3JnYW5pemF0aW9uIG9mZmxpbmVfYWNjZXNzIiwic2lkIjoiMzI3ZDlhNjgtZTkxMi00N2NiLWI3NDctNWE5YmQzZTJlZjlmIn0.iGbseoF6AXetWNa0sFsfzbmzvizwaBcY0rd14YFJqcU \ No newline at end of file
diff --git a/ansible_collections/dellemc/unity/changelogs/.plugin-cache.yaml b/ansible_collections/dellemc/unity/changelogs/.plugin-cache.yaml
new file mode 100644
index 000000000..40d737b5f
--- /dev/null
+++ b/ansible_collections/dellemc/unity/changelogs/.plugin-cache.yaml
@@ -0,0 +1,102 @@
+objects:
+ role: {}
+plugins:
+ become: {}
+ cache: {}
+ callback: {}
+ cliconf: {}
+ connection: {}
+ httpapi: {}
+ inventory: {}
+ lookup: {}
+ module:
+ cifsserver:
+ description: Manage CIFS server on Unity storage system
+ name: cifsserver
+ namespace: ''
+ version_added: 1.4.0
+ consistencygroup:
+ description: Manage consistency groups on Unity storage system
+ name: consistencygroup
+ namespace: ''
+ version_added: 1.1.0
+ filesystem:
+ description: Manage filesystem on Unity storage system
+ name: filesystem
+ namespace: ''
+ version_added: 1.1.0
+ filesystem_snapshot:
+ description: Manage filesystem snapshot on the Unity storage system
+ name: filesystem_snapshot
+ namespace: ''
+ version_added: 1.1.0
+ host:
+ description: Manage Host operations on Unity
+ name: host
+ namespace: ''
+ version_added: 1.1.0
+ info:
+ description: Gathering information about Unity
+ name: info
+ namespace: ''
+ version_added: 1.1.0
+ interface:
+ description: Manage Interfaces on Unity storage system
+ name: interface
+ namespace: ''
+ version_added: 1.4.0
+ nasserver:
+ description: Manage NAS servers on Unity storage system
+ name: nasserver
+ namespace: ''
+ version_added: 1.1.0
+ nfs:
+ description: Manage NFS export on Unity storage system
+ name: nfs
+ namespace: ''
+ version_added: 1.1.0
+ nfsserver:
+ description: Manage NFS server on Unity storage system
+ name: nfsserver
+ namespace: ''
+ version_added: 1.4.0
+ smbshare:
+ description: Manage SMB shares on Unity storage system
+ name: smbshare
+ namespace: ''
+ version_added: 1.1.0
+ snapshot:
+ description: Manage snapshots on the Unity storage system
+ name: snapshot
+ namespace: ''
+ version_added: 1.1.0
+ snapshotschedule:
+ description: Manage snapshot schedules on Unity storage system
+ name: snapshotschedule
+ namespace: ''
+ version_added: 1.1.0
+ storagepool:
+ description: Manage storage pool on Unity
+ name: storagepool
+ namespace: ''
+ version_added: 1.1.0
+ tree_quota:
+ description: Manage quota tree on the Unity storage system
+ name: tree_quota
+ namespace: ''
+ version_added: 1.2.0
+ user_quota:
+ description: Manage user quota on the Unity storage system
+ name: user_quota
+ namespace: ''
+ version_added: 1.2.0
+ volume:
+ description: Manage volume on Unity storage system
+ name: volume
+ namespace: ''
+ version_added: 1.1.0
+ netconf: {}
+ shell: {}
+ strategy: {}
+ vars: {}
+version: 1.6.0
diff --git a/ansible_collections/dellemc/unity/changelogs/changelog.yaml b/ansible_collections/dellemc/unity/changelogs/changelog.yaml
new file mode 100644
index 000000000..ee13691d9
--- /dev/null
+++ b/ansible_collections/dellemc/unity/changelogs/changelog.yaml
@@ -0,0 +1,159 @@
+ancestor: null
+releases:
+ 1.0.0:
+ changes:
+ major_changes:
+ - Added CRUD operations support for Consistency group.
+ - Added CRUD operations support for Volume.
+ - Added CRUD operations support for a snapshot schedule.
+ - Added support for CRUD operations on a host with FC/iSCSI initiators.
+ - Added support for CRUD operations on a snapshot of a volume.
+ - Added support for adding/removing volumes to/from a consistency group.
+ - Added support to add/remove FC/iSCSI initiators to/from a host.
+ - Added support to create a snapshot for a consistency group.
+ - Added support to get/modify operations on storage pool.
+ - Added support to map/unmap a host to/from a snapshot.
+ - Gather facts module is enhanced to list volumes, consistency groups, FC initiators,
+ iSCSI initiators, hosts, snapshot schedules.
+ modules:
+ - description: Manage consistency groups on Unity storage system
+ name: consistencygroup
+ namespace: ''
+ - description: Manage Host operations on Unity
+ name: host
+ namespace: ''
+ - description: Gathering information about Unity
+ name: info
+ namespace: ''
+ - description: Manage snapshots on the Unity storage system
+ name: snapshot
+ namespace: ''
+ - description: Manage snapshot schedules on Unity storage system
+ name: snapshotschedule
+ namespace: ''
+ - description: Manage storage pool on Unity
+ name: storagepool
+ namespace: ''
+ - description: Manage volume on Unity storage system
+ name: volume
+ namespace: ''
+ release_date: '2020-06-20'
+ 1.1.0:
+ changes:
+ minor_changes:
+ - Added CRUD operations support for Filesystem snapshot.
+ - Added CRUD operations support for Filesystem.
+ - Added CRUD operations support for NFS export.
+ - Added CRUD operations support for SMB share.
+ - Added support to get/modify operations on NAS server.
+ - Gather facts module is enhanced to list Filesystem snapshots, NAS servers,
+ File systems, NFS exports, SMB shares.
+ modules:
+ - description: Manage filesystem on Unity storage system
+ name: filesystem
+ namespace: ''
+ - description: Manage filesystem snapshot on the Unity storage system
+ name: filesystem_snapshot
+ namespace: ''
+ - description: Manage NAS servers on Unity storage system
+ name: nasserver
+ namespace: ''
+ - description: Manage NFS export on Unity storage system
+ name: nfs
+ namespace: ''
+ - description: Manage SMB shares on Unity storage system
+ name: smbshare
+ namespace: ''
+ release_date: '2020-12-02'
+ 1.2.0:
+ changes:
+ minor_changes:
+ - Added CRUD operations support for Quota tree.
+ - Added CRUD operations support for User Quota on Filesystem/Quota tree.
+ - Added support for Application tagging.
+ - Consistency group module is enhanced to map/unmap hosts to/from a new or existing
+ consistency group.
+ - Filesystem module is enhanced to associate/dissociate snapshot schedule to/from
+ a Filesystem.
+ - Filesystem module is enhanced to update default quota configuration during
+ create operation.
+ - Gather facts module is enhanced to list User Quota and Quota tree components.
+ - Volume module is enhanced to support map/unmap multiple hosts from a volume.
+ modules:
+ - description: Manage quota tree on the Unity storage system
+ name: tree_quota
+ namespace: ''
+ - description: Manage user quota on the Unity storage system
+ name: user_quota
+ namespace: ''
+ release_date: '2021-06-25'
+ 1.2.1:
+ changes:
+ minor_changes:
+ - Added dual licensing
+ - Documentation updates
+ - Fixed typo in galaxy.yml
+ - Updated few samples in modules
+ release_date: '2021-09-28'
+ 1.3.0:
+ changes:
+ minor_changes:
+ - Added rotating file handler for logging.
+ - Bugfix in volume module to retrieve details of non-thin volumes.
+ - Enhance host module to support add/remove network address to/from a host.
+ - Enhanced Info module to list disk groups.
+ - Enhanced Storage Pool module to support listing of drive details of a pool
+ - Enhanced Storage pool module to support creation of storage pool
+ - Enhanced consistency group module to support enable/disable replication in
+ consistency group
+ - Enhanced host module to support both mapping and un-mapping of non-logged-in
+ initiators to host.
+ - Enhanced host module to support listing of network addresses, FC initiators,
+ ISCSI initiators and allocated volumes of a host
+ - Removed dellemc.unity prefix from module names.
+ - Renamed gatherfacts module to info module
+ release_date: '2022-03-25'
+ 1.4.0:
+ changes:
+ minor_changes:
+ - Added cifsserver module to support create, list and delete CIFS server.
+ - Added execution environment manifest file to support building an execution
+ environment with ansible-builder.
+ - Added interface module to support create, list and delete interface.
+ - Added nfsserver module to support create, list and delete NFS server.
+ - Check mode is supported for Info.
+ - Enhance nfs module to support advanced host management option.
+ - Enhanced filesystem module to support create, modify and delete of filesystem
+ replication.
+ - Enhanced info module to list cifs server, nfs servers, ethernet port and file
+ interface.
+ - Enhanced nas server module to support create, modify and delete of nas server
+ replication.
+ modules:
+ - description: Manage CIFS server on Unity storage system
+ name: cifsserver
+ namespace: ''
+ - description: Manage Interfaces on Unity storage system
+ name: interface
+ namespace: ''
+ - description: Manage NFS server on Unity storage system
+ name: nfsserver
+ namespace: ''
+ release_date: '2022-06-28'
+ 1.4.1:
+ changes:
+ minor_changes:
+ - Updated the execution environment related files.
+ release_date: '2022-09-27'
+ 1.5.0:
+ changes:
+ minor_changes:
+ - Updated modules to adhere with ansible community guidelines.
+ release_date: '2022-12-20'
+ 1.6.0:
+ changes:
+ minor_changes:
+ - Support addition of host from the Host List to NFS Export in nfs module.
+ - Support enable/disable advanced dedup in volume module.
+ - Add synchronous replication support for filesystem.
+ release_date: '2023-03-31'
diff --git a/ansible_collections/dellemc/unity/changelogs/config.yaml b/ansible_collections/dellemc/unity/changelogs/config.yaml
new file mode 100644
index 000000000..b4bf6e160
--- /dev/null
+++ b/ansible_collections/dellemc/unity/changelogs/config.yaml
@@ -0,0 +1,33 @@
+---
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+ignore_other_fragment_extensions: true
+keep_fragments: false
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sanitize_changelog: true
+sections:
+ - - major_changes
+ - Major Changes
+ - - minor_changes
+ - Minor Changes
+ - - breaking_changes
+ - Breaking Changes / Porting Guide
+ - - deprecated_features
+ - Deprecated Features
+ - - removed_features
+ - Removed Features (previously deprecated)
+ - - security_fixes
+ - Security Fixes
+ - - bugfixes
+ - Bugfixes
+ - - known_issues
+ - Known Issues
+title: Dellemc.Unity
+trivial_section_name: trivial
+use_fqcn: true
diff --git a/ansible_collections/dellemc/unity/docs/ADOPTERS.md b/ansible_collections/dellemc/unity/docs/ADOPTERS.md
new file mode 100644
index 000000000..826b5cd78
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/ADOPTERS.md
@@ -0,0 +1,11 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+-->
+
+# List of adopters
diff --git a/ansible_collections/dellemc/unity/docs/BRANCHING.md b/ansible_collections/dellemc/unity/docs/BRANCHING.md
new file mode 100644
index 000000000..810a309bb
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/BRANCHING.md
@@ -0,0 +1,32 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+-->
+
+# Branching strategy
+
+Ansible modules for Dell Unity follows a scaled trunk branching strategy where short-lived branches are created off of the main branch. When coding is complete, the branch is merged back into main after being approved in a pull request code review.
+
+## Branch naming convention
+
+| Branch Type | Example | Comment |
+|--------------|-----------------------------------|-------------------------------------------|
+| main | main | |
+| Release | release-1.0 | hotfix: release-1.1 patch: release-1.0.1 |
+| Feature | feature-9-vol-support | "9" referring to GitHub issue ID |
+| Bug Fix | bugfix-110-fix-duplicates-issue | "110" referring to GitHub issue ID |
+
+
+## Steps for working on a release branch
+
+1. Fork the repository.
+2. Create a branch off of the main branch. The branch name should follow [branch naming convention](#branch-naming-convention).
+3. Make your changes and commit them to your branch.
+4. If other code changes have merged into the upstream main branch, perform a rebase of those changes into your branch.
+5. Open a [pull request](https://github.com/dell/ansible-unity/pulls) between your branch and the upstream main branch.
+6. Once your pull request has merged, your branch can be deleted.
diff --git a/ansible_collections/dellemc/unity/docs/CODE_OF_CONDUCT.md b/ansible_collections/dellemc/unity/docs/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..c791055c2
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/CODE_OF_CONDUCT.md
@@ -0,0 +1,137 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+-->
+
+# Code of conduct - contributor covenant
+
+## Our pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at ansible.team@dell.com
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary ban
+
+**Community impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent ban
+
+**Community impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
diff --git a/ansible_collections/dellemc/unity/docs/COMMITTER_GUIDE.md b/ansible_collections/dellemc/unity/docs/COMMITTER_GUIDE.md
new file mode 100644
index 000000000..8af0752e8
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/COMMITTER_GUIDE.md
@@ -0,0 +1,49 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+-->
+
+# Committer guidelines
+
+These are the guidelines for people with commit privileges on the GitHub repository. Committers act as members of the Core Team and not necessarily employees of Dell.
+
+These guidelines apply to everyone and as Committers you have been given access to commit changes because you exhibit good judgment and have demonstrated your commitment to the vision of the project. We trust that you will use these privileges wisely and not abuse it.
+
+If these privileges are abused in any way and the quality of the project is compromised, our trust will be diminished and you may be asked to not commit or lose these privileges all together.
+
+## General rules
+
+### Don't
+
+* Break the build.
+* Commit directly.
+* Compromise backward compatibility.
+* Disrespect your Community Team members. Help them grow.
+* Think it is someone elses job to test your code. Write tests for all the code you produce.
+* Forget to keep thing simple.
+* Create technical debt. Fix-in-place and make it the highest priority above everything else.
+
+### Do
+
+* Keep it simple.
+* Good work, your best every time.
+* Keep the design of your software clean and maintainable.
+* Squash your commits, avoid merges.
+* Be active. Committers that are not active may have their permissions suspended.
+* Write tests for all your deliverables.
+* Automate everything.
+* Maintain a high code coverage.
+* Keep an open communication with other Committers.
+* Ask questions.
+* Document your contributions and remember to keep it simple.
+
+## People
+
+| Name | GitHub ID | Nickname |
+|-------|-------------|------------|
+| | | |
diff --git a/ansible_collections/dellemc/unity/docs/CONTRIBUTING.md b/ansible_collections/dellemc/unity/docs/CONTRIBUTING.md
new file mode 100644
index 000000000..1cf25a511
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/CONTRIBUTING.md
@@ -0,0 +1,173 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+-->
+
+# How to contribute
+
+Become one of the contributors to this project! We thrive to build a welcoming and open community for anyone who wants to use the project or contribute to it. There are just a few small guidelines you need to follow. To help us create a safe and positive community experience for all, we require all participants to adhere to the [Code of Conduct](https://github.com/dell/ansible-unity/blob/1.6.0/docs/CODE_OF_CONDUCT.md).
+
+## Table of contents
+
+* [Become a contributor](#Become-a-contributor)
+* [Submitting issues](#Submitting-issues)
+* [Triage issues](#Triage-issues)
+* [Your first contribution](#Your-first-contribution)
+* [Branching](#Branching)
+* [Signing your commits](#Signing-your-commits)
+* [Pull requests](#Pull-requests)
+* [Code reviews](#Code-reviews)
+* [TODOs in the code](#TODOs-in-the-code)
+
+## Become a contributor
+
+You can contribute to this project in several ways. Here are some examples:
+
+* Contribute to the Ansible modules for Dell Unity documentation and codebase.
+* Report and triage bugs.
+* Feature requests.
+* Write technical documentation and blog posts, for users and contributors.
+* Help others by answering questions about this project.
+
+## Submitting issues
+
+All issues related to Ansible modules for Dell Unity, regardless of the service/repository the issue belongs to (see table above), should be submitted [here](https://github.com/dell/ansible-unity/issues). Issues will be triaged and labels will be used to indicate the type of issue. This section outlines the types of issues that can be submitted.
+
+### Report bugs
+
+We aim to track and document everything related to Ansible modules for Dell Unity via the Issues page. The code and documentation are released with no warranties or SLAs and are intended to be supported through a community driven process.
+
+Before submitting a new issue, make sure someone hasn't already reported the problem. Look through the [existing issues](https://github.com/dell/ansible-unity/issues) for similar issues.
+
+Report a bug by submitting a [bug report](https://github.com/dell/ansible-unity/issues/new?labels=type%2Fbug%2C+needs-triage&template=bug_report.md&title=%5BBUG%5D%3A). Make sure that you provide as much information as possible on how to reproduce the bug.
+
+When opening a Bug please include this information to help with debugging:
+
+1. Version of relevant software: this software, Ansible, Python, SDK, etc.
+2. Details of the issue explaining the problem: what, when, where
+3. The expected outcome that was not met (if any)
+4. Supporting troubleshooting information. __Note: Do not provide private company information that could compromise your company's security.__
+
+An Issue __must__ be created before submitting any pull request. Any pull request that is created should be linked to an Issue.
+
+### Feature request
+
+If you have an idea of how to improve this project, submit a [feature request](https://github.com/dell/ansible-unity/issues/new?labels=type%2Ffeature-request%2C+needs-triage&template=feature_request.md&title=%5BFEATURE%5D%3A).
+
+### Answering questions
+
+If you have a question and you can't find the answer in the documentation or issues, the next step is to submit a [question.](https://github.com/dell/ansible-unity/issues/new?labels=type%2Fquestion&template=ask-a-question.md&title=%5BQUESTION%5D%3A)
+
+We'd love your help answering questions being asked by other Ansible modules for Dell Unity users.
+
+## Triage issues
+
+Triage helps ensure that issues resolve quickly by:
+
+* Ensuring the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took.
+* Giving a contributor the information they need before they commit to resolving an issue.
+* Lowering the issue count by preventing duplicate issues.
+* Streamlining the development process by preventing duplicate discussions.
+
+If you don't have the knowledge or time to code, consider helping with _issue triage_. The Ansible modules for Dell Unity community will thank you for saving them time by spending some of yours.
+
+Read more about the ways you can [Triage issues](https://github.com/dell/ansible-unity/blob/1.6.0/docs/ISSUE_TRIAGE.md).
+
+## Your first contribution
+
+Unsure where to begin contributing? Start by browsing issues labeled `beginner friendly` or `help wanted`.
+
+* [Beginner-friendly](https://github.com/dell/ansible-unity/issues?q=is%3Aopen+is%3Aissue+label%3A%22beginner+friendly%22) issues are generally straightforward to complete.
+* [Help wanted](https://github.com/dell/ansible-unity/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) issues are problems we would like the community to help us with regardless of complexity.
+
+When you're ready to contribute, it's time to create a pull request.
+
+## Branching
+
+* [Branching Strategy for Ansible modules for Dell Unity](https://github.com/dell/ansible-unity/blob/1.6.0/docs/BRANCHING.md)
+
+## Signing your commits
+
+We require that developers sign off their commits to certify that they have permission to contribute the code in a pull request. This way of certifying is commonly known as the [Developer Certificate of Origin (DCO)](https://developercertificate.org/). We encourage all contributors to read the DCO text before signing a commit and making contributions.
+
+GitHub will prevent a pull request from being merged if there are any unsigned commits.
+
+### Signing a commit
+
+GPG (GNU Privacy Guard) will be used to sign commits. Follow the instructions [here](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/signing-commits) to create a GPG key and configure your GitHub account to use that key.
+
+Make sure you have your user name and e-mail set. This will be required for your signed commit to be properly verified. Check this references:
+
+* Setting up your github user name [reference](https://help.github.com/articles/setting-your-username-in-git/)
+* Setting up your e-mail address [reference](https://help.github.com/articles/setting-your-commit-email-address-in-git/)
+
+Once Git and your GitHub account have been properly configured, you can add the -S flag to the git commits:
+
+```console
+$ git commit -S -m your commit message
+# Creates a signed commit
+```
+
+### Commit message format
+
+Ansible modules for Dell Unity uses the guidelines for commit messages outlined in [How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/)
+
+## Pull requests
+
+If this is your first time contributing to an open-source project on GitHub, make sure you read about [Creating a pull request](https://help.github.com/en/articles/creating-a-pull-request).
+
+A pull request must always link to at least one GitHub issue. If that is not the case, create a GitHub issue and link it.
+
+To increase the chance of having your pull request accepted, make sure your pull request follows these guidelines:
+
+* Title and description matches the implementation.
+* Commits within the pull request follow the formatting guidelines.
+* The pull request closes one related issue.
+* The pull request contains necessary tests that verify the intended behavior.
+* If your pull request has conflicts, rebase your branch onto the main branch.
+
+If the pull request fixes a bug:
+
+* The pull request description must include `Fixes #<issue number>`.
+* To avoid regressions, the pull request should include tests that replicate the fixed bug.
+
+The team _squashes_ all commits into one when we accept a pull request. The title of the pull request becomes the subject line of the squashed commit message. We still encourage contributors to write informative commit messages, as they becomes a part of the Git commit body.
+
+We use the pull request title when we generate change logs for releases. As such, we strive to make the title as informative as possible.
+
+Make sure that the title for your pull request uses the same format as the subject line in the commit message.
+
+### Quality gates for pull requests
+
+GitHub Actions are used to enforce quality gates when a pull request is created or when any commit is made to the pull request. These GitHub Actions enforce our minimum code quality requirement for any code that get checked into the repository. If any of the quality gates fail, it is expected that the contributor will look into the check log, understand the problem and resolve the issue. If help is needed, please feel free to reach out the maintainers of the project for [support](https://github.com/dell/ansible-unity/blob/1.6.0/docs/SUPPORT.md).
+
+#### Code sanitization
+
+[GitHub action](https://github.com/dell/ansible-unity/actions/workflows/ansible-test.yml) that analyzes source code to flag ansible sanity errors and runs Unit tests.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests.
+
+A pull request must satisfy following for it to be merged:
+
+* A pull request will require at least 2 maintainer approvals.
+* Maintainers must perform a review to ensure the changes adhere to guidelines laid out in this document.
+* If any commits are made after the PR has been approved, the PR approval will automatically be removed and the above process must happen again.
+
+## Code style
+
+Ensure the added code has the required documenation, examples and unit tests.
+
+### Sanity
+
+Run ansible-test sanity --docker default on your code to ensure sanity. Ensure the code does not have any Andersson script violations and not break any existing unit test workflows.
+
+### TODOs in the code
+
+We don't like TODOs in the code or documentation. It is really best if you sort out all issues you can see with the changes before we check the changes in.
diff --git a/ansible_collections/dellemc/unity/docs/INSTALLATION.md b/ansible_collections/dellemc/unity/docs/INSTALLATION.md
new file mode 100644
index 000000000..01f2856b0
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/INSTALLATION.md
@@ -0,0 +1,100 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+-->
+
+# Installation and execution of Ansible modules for Dell Unity
+
+## Installation of SDK
+* Install the python SDK named [Storops](https://pypi.org/project/storops/). It can be installed using pip, based on appropriate python version. Execute this command:
+
+ pip install storops
+
+* Alternatively, Other installation ways can be found from [SDK](https://github.com/emc-openstack/storops#readme) page
+
+## Building collections
+ * Use this command to build the collection from source code:
+
+ ansible-galaxy collection build
+
+ For more details on how to build a tar ball, please refer to: [Building the collection](https://docs.ansible.com/ansible/latest/dev_guide/developing_collections_distributing.html#building-your-collection-tarball)
+
+## Installing collections
+
+#### Online installation of collections
+ * Use this command to install the latest collection hosted in [galaxy portal](https://galaxy.ansible.com/dellemc/unity):
+
+ ansible-galaxy collection install dellemc.unity -p <install_path>
+
+#### Offline installation of collections
+
+ * Download the latest tar build from any of the available distribution channel [Ansible Galaxy](https://galaxy.ansible.com/dellemc/unity) /[Automation Hub](https://console.redhat.com/ansible/automation-hub/repo/published/dellemc/unity) and use this command to install the collection anywhere in your system:
+
+ ansible-galaxy collection install dellemc-unity-1.6.0.tar.gz -p <install_path>
+
+ * Set the environment variable:
+
+ export ANSIBLE_COLLECTIONS_PATHS=$ANSIBLE_COLLECTIONS_PATHS:<install_path>
+
+## Using collections
+
+ * In order to use any Ansible module, ensure that the importing of proper FQCN (Fully Qualified Collection Name) must be embedded in the playbook.
+ This example can be referred to:
+
+ collections:
+ - dellemc.unity
+
+ * In order to use installed collection in a specific task use a proper FQCN (Fully Qualified Collection Name). Refer to this example:
+
+ tasks:
+ - name: Create volume
+ dellemc.unity.volume
+
+ * For generating Ansible documentation for a specific module, embed the FQCN before the module name. Refer to this example:
+
+ ansible-doc dellemc.unity.volume
+
+
+## Ansible modules execution
+
+The Ansible server must be configured with Python library for Unity to run the Ansible playbooks. The [Documents](https://github.com/dell/ansible-unity/blob/1.6.0/docs/) provide information on different Ansible modules along with their functions and syntax. The parameters table in the Product Guide provides information on various parameters which needs to be configured before running the modules.
+
+## SSL certificate validation
+
+* Copy the CA certificate to the "/etc/pki/ca-trust/source/anchors" path of the host by any external means.
+* Set the "REQUESTS_CA_BUNDLE" environment variable to the path of the SSL certificate using the command:
+
+ export REQUESTS_CA_BUNDLE=/etc/pki/ca-trust/source/anchors/<<Certificate_Name>>
+* Import the SSL certificate to host using the command:
+
+ update-ca-trust extract
+* If "TLS CA certificate bundle error" occurs, then follow these steps:
+
+ cd /etc/pki/tls/certs/
+ openssl x509 -in ca-bundle.crt -text -noout
+
+## Results
+Each module returns the updated state and details of the entity, For example, if you are using the Volume module, all calls will return the updated details of the volume. Sample result is shown in each module's documentation.
+
+## Ansible execution environment
+Ansible can also be installed in a container environment. Ansible Builder provides the ability to create reproducible, self-contained environments as container images that can be run as Ansible execution environments.
+* Install the ansible builder package using:
+
+ pip3 install ansible-builder
+* Ensure the execution-environment.yml is at the root of collection and create the execution environment using:
+
+ ansible-builder build --tag <tag_name> --container-runtime docker
+* After the image is built, run the container using:
+
+ docker run -it <tag_name> /bin/bash
+* Verify collection installation using command:
+
+ ansible-galaxy collection list
+* The playbook can be run on the container using:
+
+ docker run --rm -v $(pwd):/runner <tag_name> ansible-playbook info_test.yml
diff --git a/ansible_collections/dellemc/unity/docs/ISSUE_TRIAGE.md b/ansible_collections/dellemc/unity/docs/ISSUE_TRIAGE.md
new file mode 100644
index 000000000..d3e443494
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/ISSUE_TRIAGE.md
@@ -0,0 +1,308 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+-->
+
+# Triage issues
+
+The main goal of issue triage is to categorize all incoming issues and make sure each issue has all basic information needed for anyone else to understand and be able to start working on it.
+
+> **Note:** This information is for project Maintainers, Owners, and Admins. If you are a Contributor, then you will not be able to perform most of the tasks in this topic.
+
+The core maintainers of this project are responsible for categorizing all incoming issues and delegating any critical or important issue to other maintainers. Triage provides an important way to contribute to an open source project.
+
+Triage helps ensure issues resolve quickly by:
+
+- Ensuring the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took.
+- Giving a contributor the information they need before they commit to resolving an issue.
+- Lowering the issue count by preventing duplicate issues.
+- Streamlining the development process by preventing duplicate discussions.
+
+If you don't have the knowledge or time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours.
+
+## 1. Find issues that need triage
+
+The easiest way to find issues that haven't been triaged is to search for issues with the `needs-triage` label.
+
+## 2. Ensure the issue contains basic information
+
+Make sure that the issue's author provided the standard issue information. This project utilizes GitHub issue templates to guide contributors to provide standard information that must be included for each type of template or type of issue.
+
+### Standard issue information that must be included
+
+This section describes the various issue templates and the expected content.
+
+#### Bug reports
+
+Should explain what happened, what was expected and how to reproduce it together with any additional information that may help giving a complete picture of what happened such as screenshots, output and any environment related information that's applicable and/or maybe related to the reported problem:
+
+ - Ansible Version: [e.g. 2.14]
+ - Python Version [e.g. 3.10]
+ - Ansible modules for Dell Unity Version: [e.g. 1.6.0]
+ - Unity SDK version: [e.g. Unity 1.2.11]
+ - Any other additional information...
+
+#### Feature requests
+
+Should explain what feature that the author wants to be added and why that is needed.
+
+#### Ask a question requests
+
+In general, if the issue description and title is perceived as a question no more information is needed.
+
+### Good practices
+
+To make it easier for everyone to understand and find issues they're searching for it's suggested as a general rule of thumbs to:
+
+- Make sure that issue titles are named to explain the subject of the issue, has a correct spelling and doesn't include irrelevant information and/or sensitive information.
+- Make sure that issue descriptions doesn't include irrelevant information.
+- Make sure that issues do not contain sensitive information.
+- Make sure that issues have all relevant fields filled in.
+- Do your best effort to change title and description or request suggested changes by adding a comment.
+
+> **Note:** Above rules are applicable to both new and existing issues.
+
+### Dealing with missing information
+
+Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. Label issue with `triage/needs-information`.
+
+If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. Label issue with `triage/needs-information`.
+
+If the author does not respond to the requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided.
+
+If you receive a notification with additional information provided but you are not anymore on issue triage and you feel you do not have time to handle it, you should delegate it to the current person on issue triage.
+
+## 3. Categorizing an issue
+
+### Duplicate issues
+
+Make sure it's not a duplicate by searching existing issues using related terms from the issue title and description. If you think you know there is an existing issue, but can't find it, please reach out to one of the maintainers and ask for help. If you identify that the issue is a duplicate of an existing issue:
+
+1. Add a comment `duplicate of #<issue number>`
+2. Add the `triage/duplicate` label
+
+### Bug reports
+
+If it's not perfectly clear that it's an actual bug, quickly try to reproduce it.
+
+**It's a bug/it can be reproduced:**
+
+1. Add a comment describing detailed steps for how to reproduce it, if applicable.
+2. If you know that maintainers wont be able to put any resources into it for some time then label the issue with `help wanted` and optionally `beginner friendly` together with pointers on which code to update to fix the bug. This should signal to the community that we would appreciate any help we can get to resolve this.
+3. Move on to [prioritizing the issue](#4-prioritization-of-issues).
+
+**It can't be reproduced:**
+
+1. Either [ask for more information](#2-ensure-the-issue-contains-basic-information) needed to investigate it more thoroughly. Provide details in a comment.
+2. Either [delegate further investigations](#investigation-of-issues) to someone else. Provide details in a comment.
+
+**It works as intended/by design:**
+
+1. Kindly and politely add a comment explaining briefly why we think it works as intended and close the issue.
+2. Label the issue `triage/works-as-intended`.
+3. Remove the `needs-triage` label.
+
+**It does not work as intended/by design:**
+
+### Feature requests
+
+1. If the feature request does not align with the product vision, add a comment indicating so, remove the `needs-triage` label and close the issue
+2. Otherwise, move on to [prioritizing the issue](#4-prioritization-of-issues). Assign the appropriate priority label to the issue, add the appropriate comments to the issue, and remove the `needs-triage` label.
+
+## 4. Prioritization of issues
+
+In general bugs and feature request issues should be labeled with a priority.
+
+Adding priority levels can be difficult. Ensure you have the knowledge, context, and the experience before prioritizing any issues. If you have any uncertainty as to which priority level to assign, please ask the maintainers for help.
+
+The key here is asking for help and discuss issues to understand how more experienced project members think and reason. By doing that you learn more and eventually be more and more comfortable with prioritizing issues.
+
+In case there is an uncertainty around the prioritization of an issue, please ask the maintainers for help.
+
+| Label | Description |
+| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------ |
+| `priority/critical` | Highest priority. Must be actively worked on as someone's top priority immediately. |
+| `priority/high` | Must be worked on soon, ideally in time for the next release. |
+| `priority/low` | Lowest priority. Possibly useful, but not yet enough interest in it. |
+
+### Critical priority
+
+1. If an issue has been categorized and any of this criteria apply, the issue should be labeled as critical and must be actively worked on as someone's top priority immediately.
+
+ - Results in any data loss
+ - Critical security or performance issues
+ - Problem that makes a feature unusable
+ - Multiple users experience a severe problem affecting their business, users etc.
+
+2. Label the issue `priority/critical`.
+3. Escalate the problem to the maintainers.
+4. Assign or ask a maintainer for help assigning someone to make this issue their top priority immediately.
+5. Add the issue to the next upcoming release milestone.
+
+### High priority
+
+1. Label the issue `priority/high`.
+2. Add the issue to the next upcoming release milestone.
+3. Prioritize it or assign someone to work on it now or very soon.
+4. Consider requesting [help from the community](#5-requesting-help-from-the-community).
+
+### Low priority
+
+1. If the issue is deemed possibly useful but a low priority label the issue `priority/low`.
+2. The amount of interest in the issue will determine if the priority elevated.
+3. Consider requesting [help from the community](#5-requesting-help-from-the-community).
+
+## 5. Requesting help from the community
+
+Depending on the issue and/or priority, it's always a good idea to consider signalling to the community that help from community is appreciated and needed in case an issue is not prioritized to be worked on by maintainers. Use your best judgement. In general, requesting help from the community means that a contribution has a good chance of getting accepted and merged.
+
+In many cases the issue author or community as a whole is more suitable to contribute changes since they're experts in their domain. It's also quite common that someone has tried to get something to work using the documentation without success and made an effort to get it to work and/or reached out to the community to get the missing information.
+
+1. Kindly and politely add a comment to alert update subscribers.
+ - Explain the issue and the need for resolution. Be sure and detail that the issue has not been prioritized and that the issue has not been scheduled for work by the maintainers.
+ - If possible or applicable, add pointers and references to the code/files that need to be revised. Provide any ideas as to the solution. This will help the maintainers get started on resolving the issue.
+2. Label the issue with `help wanted`.
+3. If applicable, label the issue with `beginner friendly` to denote that the issue is suitable for a beginner to work on.
+
+## Investigation of issues
+
+When an issue has all basic information provided, but the reported problem cannot be reproduced at a first glance, the issue is labeled `triage/needs-information`. Depending on the perceived severity and/or number of [upvotes](https://help.github.com/en/articles/about-conversations-on-github#reacting-to-ideas-in-comments), the investigation will either be delegated to another maintainer for further investigation or put on hold until someone else (maintainer or contributor) picks it up and eventually starts investigating it.
+
+Even if you don't have the time or knowledge to investigate an issue we highly recommend that you [upvote](https://help.github.com/en/articles/about-conversations-on-github#reacting-to-ideas-in-comments) the issue if you happen to have the same problem. If you have further details that may help investigating the issue please provide as much information as possible.
+
+## External pull requests
+
+Part of issue triage should also be triaging of external PRs. Main goal should be to make sure PRs from external contributors have an owner/reviewer and are not forgotten.
+
+1. Check new external PRs which do not have a reviewer.
+1. Check if there is a link to an existing issue.
+1. If not and you know which issue it is solving, add the link yourself, otherwise ask the author to link the issue or create one.
+1. Assign a reviewer based on who was handling the linked issue or what code or feature does the PR touches (look at who was the last to make changes there if all else fails).
+
+## GitHub issue management workflow
+
+This section describes the triage workflow for new GitGHub issues that get created.
+
+### GitHub Issue: Bug
+
+This workflow starts off with a GitHub issue of type bug being created.
+
+1. Collaborator or maintainer creates a GitHub bug using the appropriate GitHub issue template
+2. By default a bug will be created with the `type/bug` and `needs-triage` labels
+
+The following flow chart outlines the triage process for bugs.
+
+<!-- https://textik.com/#38ec14781648871c -->
+```
+ +--------------------------+
+ | New bug issue opened/more|
+ | information added |
+ +-------------|------------+
+ |
+ |
+ +----------------------------------+ NO +--------------|-------------+
+ | label: triage/needs-information --------- All required information |
+ | | | contained in issue? |
+ +-----------------------------|----+ +--------------|-------------+
+ | | YES
+ | |
+ +--------------------------+ | +---------------------+ YES +---------------------------------------+
+ |label: | | | Dupicate Issue? ------- Comment `Duplicate of #<issue number>`
+ |triage/needs-investigation| | NO | | | Remove needs-triage label |
+ +------|-------------------+ | +----------|----------+ | label: triage/duplicate |
+ | | | NO +-----------------|---------------------+
+ YES | | | |
+ | +---------------|----+ NO +------------|------------+ |
+ | |Needs investigation?|---------- Can it be reproduced? | |
+ |------- | +------------|------------+ |
+ +--------------------+ | YES |
+ | +----------|----------+
+ +-------------------------+ +------------|------------+ | Close Issue |
+ | Add release-found label |------------------ Works as intended? | | |
+ | label: release-found/* | NO | | +----------|----------+
+ +------------|------------+ +------------|------------+ |
+ | | |
+ | | YES |
+ +-----------------------------+ +----------------|----------------+ |
+ | Add area label | | Add comment | |
+ | label: area/* | | Remove needs-triage label ------------------|
+ +------------|----------------+ | label: triage/works-as-intended |
+ | +---------------------------------+
+ |
+ +------------|-------------+ +----------+
+ | Add priority label | | Done ----------------------------------------
+ | label: priority/* | +----|-----+ |
+ +------------|-------------+ |NO |
+ | | +------------------|------------------+
+ +------------|-------------+ +----|----------------+ YES | Add details to issue |
+ | ------------ Signal Community? ---------- label: help wanted |
+ |Remove needs-triage label | | | | label: beginner friendly (optional)|
+ +--------------------------+ +---------------------+ +-------------------------------------+
+
+```
+
+If the author does not respond to a request for more information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided.
+
+### GitHub issue: feature request
+
+This workflow starts off with a GitHub issue of type feature request being created.
+
+1. Collaborator or maintainer creates a GitHub feature request using the appropriate GitHub issue template
+2. By default a feature request will be created with the `type/feature-request` and `needs-triage` labels
+
+This flow chart outlines the triage process for feature requests.
+
+<!-- https://textik.com/#81e81fc717f63429 -->
+```
+ +---------------------------------+
+ |New feature request issue opened/|
+ |more information added |
+ +----------------|----------------+
+ |
+ |
+ +---------------------------------+ NO +-------------|------------+
+ | label: triage/needs-information ---------- All required information |
+ | | | contained in issue? |
+ +---------------------------------+ +-------------|------------+
+ |
+ |
+ +---------------------------------------+ |
+ |Comment `Duplicate of #<issue number>` | YES +----------|----------+
+ |Remove needs-triage label ------- Duplicate issue? |
+ |label: triage/duplicate | | |
+ +-----|---------------------------------+ +-----------|---------+
+ | |NO
+ | +-------------------------+ NO +-----------------------------+
+ | |Add comment |-------- Does feature request align |
+ | |Remove needs-triage label| | with product vision? |
+ | +------|------------------+ +--------------|--------------+
+ | | | YES
+ | | +-----------------|----------------+
+ | | |Change feature-request to feature |
+ | | |Remove label: type/feature-request|
+ | | |Add label: type/feature |
+ | | +-----------------|----------------+
+ | | |
+ | | +--------------|--------------+
+ | | | Add area label |
+ | | | label: area/* |
+ | | +--------------|--------------+
+ | | |
+ +-|---------|---+ +--------+ +--------------|--------------+
+ | Close issue | | Done --------- Add priority label |
+ | | | | | label: priority/* |
+ +---------------+ +--------+ +-----------------------------+
+```
+
+If the author does not respond to a request for more information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided.
+
+In some cases you may receive a request you do not wish to accept. Perhaps the request doesn't align with the project scope or vision. It is important to tactfully handle contributions that don't meet the project standards.
+
+1. Acknowledge the person behind the contribution and thank them for their interest and contribution
+2. Explain why it didn't fit into the scope of the project or vision
+3. Don't leave an unwanted contributions open. Immediately close the contribution you do not wish to accept
diff --git a/ansible_collections/dellemc/unity/docs/MAINTAINERS.md b/ansible_collections/dellemc/unity/docs/MAINTAINERS.md
new file mode 100644
index 000000000..4679f6d73
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/MAINTAINERS.md
@@ -0,0 +1,18 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+-->
+
+# Maintainers
+
+* Ananthu Kuttattu (kuttattz)
+* Bhavneet Sharma (Bhavneet-Sharma)
+* Jennifer John (Jennifer-John)
+* Meenakshi Dembi (meenakshidembi691)
+* Pavan Mudunuri (Pavan-Mudunuri)
+* Trisha Datta (trisha-dell)
diff --git a/ansible_collections/dellemc/unity/docs/MAINTAINER_GUIDE.md b/ansible_collections/dellemc/unity/docs/MAINTAINER_GUIDE.md
new file mode 100644
index 000000000..78d13dd1d
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/MAINTAINER_GUIDE.md
@@ -0,0 +1,38 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+-->
+
+# Maintainer guidelines
+
+As a Maintainer of this project you have the responsibility of keeping true to the vision of the project with high-degree quality. Being part of this group is a privilege that requires dedication and time to attend to the daily activities that are associated with the maintenance of this project.
+
+## Becoming a maintainer
+
+Most Maintainers started as Contributors that have demonstrated their commitment to the success of the project. Contributors wishing to become Maintainers, must demonstrate commitment to the success of the project by contributing code, reviewing others' work, and triaging issues on a regular basis for at least three months.
+
+The contributions alone don't make you a Maintainer. You need to earn the trust of the current Maintainers and other project Contributors, that your decisions and actions are in the best interest of the project.
+
+Periodically, the existing Maintainers curate a list of Contributors who have shown regular activity on the project over the prior months. It is from this list that Maintainer candidates are selected.
+
+After a candidate is selected, the existing Maintainers discuss the candidate over the next 5 business days, provide feedback, and vote. At least 75% of the current Maintainers must vote in the affirmative for a candidate to be moved to the role of Maintainer.
+
+If a candidate is approved, a Maintainer contacts the candidate to invite them to open a pull request that adds the contributor to the MAINTAINERS file. The candidate becomes a Maintainer once the pull request is merged.
+
+## Maintainer policies
+
+* Lead by example
+* Follow the [Code of Conduct](https://github.com/dell/ansible-unity/blob/1.6.0/docs/CODE_OF_CONDUCT.md) and the guidelines in the [Contributing](https://github.com/dell/ansible-unity/blob/1.6.0/docs/CONTRIBUTING.md) and [Committer](https://github.com/dell/ansible-unity/blob/1.6.0/docs/COMMITTER_GUIDE.md) guides
+* Promote a friendly and collaborative environment within our community
+* Be actively engaged in discussions, answering questions, updating defects, and reviewing pull requests
+* Criticize code, not people. Ideally, tell the contributor a better way to do what they need.
+* Clearly mark optional suggestions as such. Best practice, start your comment with *At your option: …*
+
+## Project decision making
+
+All project decisions should contribute to successfully executing on the project roadmap. Project milestones are established for each release.
diff --git a/ansible_collections/dellemc/unity/docs/Release Notes.md b/ansible_collections/dellemc/unity/docs/Release Notes.md
new file mode 100644
index 000000000..47d3fa3a5
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/Release Notes.md
@@ -0,0 +1,78 @@
+**Ansible Modules for Dell Technologies Unity**
+=========================================
+### Release Notes 1.6.0
+
+> © 2022 Dell Inc. or its subsidiaries. All rights reserved. Dell
+> and other trademarks are trademarks of Dell Inc. or its
+> subsidiaries. Other trademarks may be trademarks of their respective
+> owners.
+
+Content
+-------
+These release notes contain supplemental information about Ansible
+Modules for Dell Technologies (Dell) Unity.
+
+- Revision History
+- Product Description
+- New Features & Enhancements
+- Known Issues
+- Limitations
+- Distribution
+- Documentation
+
+Revision history
+----------------
+The table in this section lists the revision history of this document.
+
+Table 1. Revision history
+
+| Revision | Date | Description |
+|----------|----------------|---------------------------------------------------------|
+| 01 | March 2023 | Current release of Ansible Modules for Dell Unity 1.6.0 |
+
+Product Description
+-------------------
+The Ansible modules for Dell Unity are used to automate and orchestrate the deployment, configuration, and management of Dell Unity Family systems, including Unity, Unity XT, and the UnityVSA. The capabilities of Ansible modules are managing host, consistency group, filesystem, filesystem snapshots, CIFS server, NAS servers, NFS server, NFS export, SMB shares, interface, snapshots, snapshot schedules, storage pool, tree quota, user quota, volumes and obtaining Unity system information. The options available for each capability are list, show, create, delete, and modify; except for NAS server for which options available are list & modify and for CIFS server, NFS server the options available are create, list & modify.
+
+New features & enhancements
+---------------------------
+This release has the following changes -
+
+- Support addition of host from the Host List to NFS Export in nfs module.
+- Support enable/disable advanced dedup in volume module.
+- Add synchronous replication support for filesystem.
+
+Known issues
+------------
+Known issues in this release are listed below:
+- Filesystem creation with quota config
+ - Setting quota configuration while creating a filesystem may sometimes cause a delay in fetching the details about the quota config of the new filesystem. The module will throw an error to rerun the task to see the expected result.
+
+- Mapping and unmapping of hosts for a Consistency group
+ - Interoperability between Ansible Unity playbooks and Unisphere REST API is not supported for the mapping and unmapping of hosts for a consistency group.
+ > **WORKAROUND:** It is recommended to use Ansible Unity modules consistently for all mapping and unmapping of hosts for a consistency group instead of partially/mutually doing it through Unisphere and Ansible modules.
+
+- Unmapping of LUN's from consistency group after disabling replication fails intermittently
+ - Immediate removal/unmapping of LUN's after disabling replication may fail with this error message which indicates that the consistency group has snapshots.
+
+ ``` "The LUN cannot be removed from the Consistency group because there are snapshots of the Consistency group that include the selected LUN. Please remove all snapshots containing the selected LUN and try again. (Error Code:0x6000c16)" ```
+
+ > **NOTE:** It is recommended to avoid immediate removal/unmapping of LUN's after disabling replication.
+
+
+Limitations
+-----------
+There are no known limitations.
+
+Distribution
+----------------
+The software package is available for download from the [Ansible Modules
+for Unity GitHub](https://github.com/dell/ansible-unity/) page.
+
+Documentation
+-------------
+The documentation is available on [Ansible Modules for Unity GitHub](https://github.com/dell/ansible-unity/tree/1.6.0/docs)
+page. It includes the following:
+- README
+- Release Notes (this document)
+- Product Guide
diff --git a/ansible_collections/dellemc/unity/docs/SECURITY.md b/ansible_collections/dellemc/unity/docs/SECURITY.md
new file mode 100644
index 000000000..16e1acf79
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/SECURITY.md
@@ -0,0 +1,22 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+-->
+
+# Security policy
+
+The Ansible modules for Dell Unity repository are inspected for security vulnerabilities via blackduck scans and static code analysis.
+
+In addition to this, there are various security checks that get executed against a branch when a pull request is created/updated. Please refer to [pull request](https://github.com/dell/ansible-unity/blob/1.6.0/docs/CONTRIBUTING.md#Pull-requests) for more information.
+
+## Reporting a vulnerability
+
+Have you discovered a security vulnerability in this project?
+We ask you to alert the maintainers by sending an email, describing the issue, impact, and fix - if applicable.
+
+You can reach the Ansible modules for Dell Unity maintainers at ansible.team@dell.com.
diff --git a/ansible_collections/dellemc/unity/docs/SUPPORT.md b/ansible_collections/dellemc/unity/docs/SUPPORT.md
new file mode 100644
index 000000000..78931d078
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/SUPPORT.md
@@ -0,0 +1,12 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+-->
+
+## Support
+For all your support needs you can interact with us on [GitHub](https://github.com/dell/ansible-unity) by creating a [GitHub Issue](https://github.com/dell/ansible-unity/issues) or through the [Ansible Community](https://www.dell.com/community/Automation/bd-p/Automation).
diff --git a/ansible_collections/dellemc/unity/docs/modules/cifsserver.rst b/ansible_collections/dellemc/unity/docs/modules/cifsserver.rst
new file mode 100644
index 000000000..71b7527f2
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/cifsserver.rst
@@ -0,0 +1,306 @@
+.. _cifsserver_module:
+
+
+cifsserver -- Manage CIFS server on Unity storage system
+========================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing the CIFS server on the Unity storage system includes creating CIFS server, getting CIFS server details and deleting CIFS server.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ nas_server_name (optional, str, None)
+ Name of the NAS server on which CIFS server will be hosted.
+
+
+ nas_server_id (optional, str, None)
+ ID of the NAS server on which CIFS server will be hosted.
+
+
+ netbios_name (optional, str, None)
+ The computer name of the SMB server in Windows network.
+
+
+ workgroup (optional, str, None)
+ Standalone SMB server workgroup.
+
+
+ local_password (optional, str, None)
+ Standalone SMB server administrator password.
+
+
+ domain (optional, str, None)
+ The domain name where the SMB server is registered in Active Directory.
+
+
+ domain_username (optional, str, None)
+ Active Directory domain user name.
+
+
+ domain_password (optional, str, None)
+ Active Directory domain password.
+
+
+ cifs_server_name (optional, str, None)
+ The name of the CIFS server.
+
+
+ cifs_server_id (optional, str, None)
+ The ID of the CIFS server.
+
+
+ interfaces (optional, list, None)
+ List of file IP interfaces that service CIFS protocol of SMB server.
+
+
+ unjoin_cifs_server_account (optional, bool, None)
+ Keep SMB server account unjoined in Active Directory after deletion.
+
+ ``false`` specifies keep SMB server account joined after deletion.
+
+ ``true`` specifies unjoin SMB server account from Active Directory before deletion.
+
+
+ state (True, str, None)
+ Define whether the CIFS server should exist or not.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Create CIFS server belonging to Active Directory
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "test_nas1"
+ cifs_server_name: "test_cifs"
+ domain: "ad_domain"
+ domain_username: "domain_username"
+ domain_password: "domain_password"
+ state: "present"
+
+ - name: Get CIFS server details using CIFS server ID
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cifs_server_id: "cifs_37"
+ state: "present"
+
+ - name: Get CIFS server details using NAS server name
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "test_nas1"
+ state: "present"
+
+ - name: Delete CIFS server
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cifs_server_id: "cifs_37"
+ unjoin_cifs_server_account: True
+ domain_username: "domain_username"
+ domain_password: "domain_password"
+ state: "absent"
+
+ - name: Create standalone CIFS server
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ netbios_name: "ANSIBLE_CIFS"
+ workgroup: "ansible"
+ local_password: "Password123!"
+ nas_server_name: "test_nas1"
+ state: "present"
+
+ - name: Get CIFS server details using netbios name
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ netbios_name: "ANSIBLE_CIFS"
+ state: "present"
+
+ - name: Delete standalone CIFS server
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cifs_server_id: "cifs_40"
+ state: "absent"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+cifs_server_details (When CIFS server exists, dict, {'description': None, 'domain': 'xxx.xxx.xxx.com', 'existed': True, 'file_interfaces': {'UnityFileInterfaceList': [{'UnityFileInterface': {'hash': -9223363258905013637, 'id': 'if_43'}}]}, 'hash': -9223363258905010379, 'health': {'UnityHealth': {'hash': 8777949765559}}, 'id': 'cifs_40', 'is_standalone': False, 'last_used_organizational_unit': 'ou=Computers,ou=Dell NAS servers', 'name': 'ansible_cifs', 'nas_server': {'UnityNasServer': {'hash': 8777949765531, 'id': 'nas_18'}}, 'netbios_name': 'ANSIBLE_CIFS', 'smb_multi_channel_supported': True, 'smb_protocol_versions': ['1.0', '2.0', '2.1', '3.0'], 'smbca_supported': True, 'workgroup': None})
+ Details of the CIFS server.
+
+
+ id (, str, )
+ Unique identifier of the CIFS server instance.
+
+
+ name (, str, )
+ User-specified name for the SMB server.
+
+
+ netbios_name (, str, )
+ Computer Name of the SMB server in windows network.
+
+
+ description (, str, )
+ Description of the SMB server.
+
+
+ domain (, str, )
+ Domain name where SMB server is registered in Active Directory.
+
+
+ workgroup (, str, )
+ Windows network workgroup for the SMB server.
+
+
+ is_standalone (, bool, )
+ Indicates whether the SMB server is standalone.
+
+
+ nasServer (, dict, )
+ Information about the NAS server in the storage system.
+
+
+ UnityNasServer (, dict, )
+ Information about the NAS server in the storage system.
+
+
+ id (, str, )
+ Unique identifier of the NAS server instance.
+
+
+
+
+ file_interfaces (, dict, )
+ The file interfaces associated with the NAS server.
+
+
+ UnityFileInterfaceList (, list, )
+ List of file interfaces associated with the NAS server.
+
+
+ UnityFileInterface (, dict, )
+ Details of file interface associated with the NAS server.
+
+
+ id (, str, )
+ Unique identifier of the file interface.
+
+
+
+
+
+ smb_multi_channel_supported (, bool, )
+ Indicates whether the SMB 3.0+ multichannel feature is supported.
+
+
+ smb_protocol_versions (, list, )
+ Supported SMB protocols, such as 1.0, 2.0, 2.1, 3.0, and so on.
+
+
+ smbca_supported (, bool, )
+ Indicates whether the SMB server supports continuous availability.
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Akash Shendge (@shenda1) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/consistencygroup.rst b/ansible_collections/dellemc/unity/docs/modules/consistencygroup.rst
new file mode 100644
index 000000000..ac5727cfd
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/consistencygroup.rst
@@ -0,0 +1,506 @@
+.. _consistencygroup_module:
+
+
+consistencygroup -- Manage consistency groups on Unity storage system
+=====================================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing the consistency group on the Unity storage system includes creating new consistency group, adding volumes to consistency group, removing volumes from consistency group, mapping hosts to consistency group, unmapping hosts from consistency group, renaming consistency group, modifying attributes of consistency group, enabling replication in consistency group, disabling replication in consistency group and deleting consistency group.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ cg_name (optional, str, None)
+ The name of the consistency group.
+
+ It is mandatory for the create operation.
+
+ Specify either *cg_name* or *cg_id* (but not both) for any operation.
+
+
+ cg_id (optional, str, None)
+ The ID of the consistency group.
+
+ It can be used only for get, modify, add/remove volumes, or delete operations.
+
+
+ volumes (optional, list, None)
+ This is a list of volumes.
+
+ Either the volume ID or name must be provided for adding/removing existing volumes from consistency group.
+
+ If *volumes* are given, then *vol_state* should also be specified.
+
+ Volumes cannot be added/removed from consistency group, if the consistency group or the volume has snapshots.
+
+
+ vol_id (optional, str, None)
+ The ID of the volume.
+
+
+ vol_name (optional, str, None)
+ The name of the volume.
+
+
+
+ vol_state (optional, str, None)
+ String variable, describes the state of volumes inside consistency group.
+
+ If *volumes* are given, then *vol_state* should also be specified.
+
+
+ new_cg_name (optional, str, None)
+ The new name of the consistency group, used in rename operation.
+
+
+ description (optional, str, None)
+ Description of the consistency group.
+
+
+ snap_schedule (optional, str, None)
+ Snapshot schedule assigned to the consistency group.
+
+ Specifying an empty string "" removes the existing snapshot schedule from consistency group.
+
+
+ tiering_policy (optional, str, None)
+ Tiering policy choices for how the storage resource data will be distributed among the tiers available in the pool.
+
+
+ hosts (optional, list, None)
+ This is a list of hosts.
+
+ Either the host ID or name must be provided for mapping/unmapping hosts for a consistency group.
+
+ If *hosts* are given, then *mapping_state* should also be specified.
+
+ Hosts cannot be mapped to a consistency group, if the consistency group has no volumes.
+
+ When a consistency group is being mapped to the host, users should not use the volume module to map the volumes in the consistency group to hosts.
+
+
+ host_id (optional, str, None)
+ The ID of the host.
+
+
+ host_name (optional, str, None)
+ The name of the host.
+
+
+
+ mapping_state (optional, str, None)
+ String variable, describes the state of hosts inside the consistency group.
+
+ If *hosts* are given, then *mapping_state* should also be specified.
+
+
+ replication_params (optional, dict, None)
+ Settings required for enabling replication.
+
+
+ destination_cg_name (optional, str, None)
+ Name of the destination consistency group.
+
+ Default value will be source consistency group name prefixed by 'DR_'.
+
+
+ replication_mode (True, str, None)
+ The replication mode.
+
+
+ rpo (optional, int, None)
+ Maximum time to wait before the system syncs the source and destination LUNs.
+
+ Option *rpo* should be specified if the *replication_mode* is ``asynchronous``.
+
+ The value should be in range of ``5`` to ``1440``.
+
+
+ replication_type (optional, str, local)
+ Type of replication.
+
+
+ remote_system (optional, dict, None)
+ Details of remote system to which the replication is being configured.
+
+ The *remote_system* option should be specified if the *replication_type* is ``remote``.
+
+
+ remote_system_host (True, str, None)
+ IP or FQDN for remote Unity unisphere Host.
+
+
+ remote_system_username (True, str, None)
+ User name of remote Unity unisphere Host.
+
+
+ remote_system_password (True, str, None)
+ Password of remote Unity unisphere Host.
+
+
+ remote_system_verifycert (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate of remote Unity unisphere Host.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ remote_system_port (optional, int, 443)
+ Port at which remote Unity unisphere is hosted.
+
+
+
+ destination_pool_name (optional, str, None)
+ Name of pool to allocate destination Luns.
+
+ Mutually exclusive with *destination_pool_id*.
+
+
+ destination_pool_id (optional, str, None)
+ Id of pool to allocate destination Luns.
+
+ Mutually exclusive with *destination_pool_name*.
+
+
+
+ replication_state (optional, str, None)
+ State of the replication.
+
+
+ state (True, str, None)
+ Define whether the consistency group should exist or not.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is not supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Create consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ cg_name: "{{cg_name}}"
+ description: "{{description}}"
+ snap_schedule: "{{snap_schedule1}}"
+ state: "present"
+
+ - name: Get details of consistency group using id
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "{{cg_id}}"
+ state: "present"
+
+ - name: Add volumes to consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "{{cg_id}}"
+ volumes:
+ - vol_name: "Ansible_Test-3"
+ - vol_id: "sv_1744"
+ vol_state: "{{vol_state_present}}"
+ state: "present"
+
+ - name: Rename consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "{{cg_name}}"
+ new_cg_name: "{{new_cg_name}}"
+ state: "present"
+
+ - name: Modify consistency group details
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "{{new_cg_name}}"
+ snap_schedule: "{{snap_schedule2}}"
+ tiering_policy: "{{tiering_policy1}}"
+ state: "present"
+
+ - name: Map hosts to a consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "{{cg_id}}"
+ hosts:
+ - host_name: "10.226.198.248"
+ - host_id: "Host_511"
+ mapping_state: "mapped"
+ state: "present"
+
+ - name: Unmap hosts from a consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "{{cg_id}}"
+ hosts:
+ - host_id: "Host_511"
+ - host_name: "10.226.198.248"
+ mapping_state: "unmapped"
+ state: "present"
+
+ - name: Remove volumes from consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "{{new_cg_name}}"
+ volumes:
+ - vol_name: "Ansible_Test-3"
+ - vol_id: "sv_1744"
+ vol_state: "{{vol_state_absent}}"
+ state: "present"
+
+ - name: Delete consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "{{new_cg_name}}"
+ state: "absent"
+
+ - name: Enable replication for consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "cg_id_1"
+ replication_params:
+ destination_cg_name: "destination_cg_1"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "remote"
+ remote_system:
+ remote_system_host: '10.1.2.3'
+ remote_system_verifycert: False
+ remote_system_username: 'username'
+ remote_system_password: 'password'
+ destination_pool_name: "pool_test_1"
+ replication_state: "enable"
+ state: "present"
+
+ - name: Disable replication for consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "dis_repl_ans_source"
+ replication_state: "disable"
+ state: "present"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+consistency_group_details (When consistency group exists, dict, {'advanced_dedup_status': 'DedupStatusEnum.DISABLED', 'block_host_access': None, 'cg_replication_enabled': False, 'data_reduction_percent': 0, 'data_reduction_ratio': 1.0, 'data_reduction_size_saved': 0, 'data_reduction_status': 'DataReductionStatusEnum.DISABLED', 'datastores': None, 'dedup_status': None, 'description': 'Ansible testing', 'esx_filesystem_block_size': None, 'esx_filesystem_major_version': None, 'existed': True, 'filesystem': None, 'hash': 8776023812033, 'health': {'UnityHealth': {'hash': 8776023811889}}, 'host_v_vol_datastore': None, 'id': 'res_7477', 'is_replication_destination': False, 'is_snap_schedule_paused': None, 'luns': None, 'metadata_size': 0, 'metadata_size_allocated': 0, 'name': 'Ansible_CG_Testing', 'per_tier_size_used': None, 'pools': None, 'relocation_policy': 'TieringPolicyEnum.MIXED', 'replication_type': 'ReplicationTypeEnum.NONE', 'size_allocated': 0, 'size_total': 0, 'size_used': None, 'snap_count': 0, 'snap_schedule': None, 'snaps_size_allocated': 0, 'snaps_size_total': 0, 'snapshots': [], 'thin_status': 'ThinStatusEnum.FALSE', 'type': 'StorageResourceTypeEnum.CONSISTENCY_GROUP', 'virtual_volumes': None, 'vmware_uuid': None})
+ Details of the consistency group.
+
+
+ id (, str, )
+ The system ID given to the consistency group.
+
+
+ relocation_policy (, str, )
+ FAST VP tiering policy for the consistency group.
+
+
+ cg_replication_enabled (, bool, )
+ Whether or not the replication is enabled..
+
+
+ snap_schedule (, dict, )
+ Snapshot schedule applied to consistency group.
+
+
+ UnitySnapSchedule (, dict, )
+ Snapshot schedule applied to consistency group.
+
+
+ id (, str, )
+ The system ID given to the snapshot schedule.
+
+
+ name (, str, )
+ The name of the snapshot schedule.
+
+
+
+
+ luns (, dict, )
+ Details of volumes part of consistency group.
+
+
+ UnityLunList (, list, )
+ List of volumes part of consistency group.
+
+
+ UnityLun (, dict, )
+ Detail of volume.
+
+
+ id (, str, )
+ The system ID given to volume.
+
+
+ name (, str, )
+ The name of the volume.
+
+
+
+
+
+ snapshots (, list, )
+ List of snapshots of consistency group.
+
+
+ name (, str, )
+ Name of the snapshot.
+
+
+ creation_time (, str, )
+ Date and time on which the snapshot was taken.
+
+
+ expirationTime (, str, )
+ Date and time after which the snapshot will expire.
+
+
+ storageResource (, dict, )
+ Storage resource for which the snapshot was taken.
+
+
+ UnityStorageResource (, dict, )
+ Details of the storage resource.
+
+
+ id (, str, )
+ The id of the storage resource.
+
+
+
+
+
+ block_host_access (, dict, )
+ Details of hosts mapped to the consistency group.
+
+
+ UnityBlockHostAccessList (, list, )
+ List of hosts mapped to consistency group.
+
+
+ UnityBlockHostAccess (, dict, )
+ Details of host.
+
+
+ id (, str, )
+ The ID of the host.
+
+
+ name (, str, )
+ The name of the host.
+
+
+
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Akash Shendge (@shenda1) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/filesystem.rst b/ansible_collections/dellemc/unity/docs/modules/filesystem.rst
new file mode 100644
index 000000000..81881dfbb
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/filesystem.rst
@@ -0,0 +1,643 @@
+.. _filesystem_module:
+
+
+filesystem -- Manage filesystem on Unity storage system
+=======================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing filesystem on Unity storage system includes Create new filesystem, Modify snapschedule attribute of filesystem, Modify filesystem attributes, Display filesystem details, Display filesystem snapshots, Display filesystem snapschedule, Delete snapschedule associated with the filesystem, Delete filesystem, Create new filesystem with quota configuration, Enable, modify and disable replication.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ filesystem_name (optional, str, None)
+ The name of the filesystem. Mandatory only for the create operation. All the operations are supported through *filesystem_name*.
+
+ It is mutually exclusive with *filesystem_id*.
+
+
+ filesystem_id (optional, str, None)
+ The id of the filesystem.
+
+ It can be used only for get, modify, or delete operations.
+
+ It is mutually exclusive with *filesystem_name*.
+
+
+ pool_name (optional, str, None)
+ This is the name of the pool where the filesystem will be created.
+
+ Either the *pool_name* or *pool_id* must be provided to create a new filesystem.
+
+
+ pool_id (optional, str, None)
+ This is the ID of the pool where the filesystem will be created.
+
+ Either the *pool_name* or *pool_id* must be provided to create a new filesystem.
+
+
+ size (optional, int, None)
+ The size of the filesystem.
+
+
+ cap_unit (optional, str, None)
+ The unit of the filesystem size. It defaults to ``GB``, if not specified.
+
+
+ nas_server_name (optional, str, None)
+ Name of the NAS server on which filesystem will be hosted.
+
+
+ nas_server_id (optional, str, None)
+ ID of the NAS server on which filesystem will be hosted.
+
+
+ supported_protocols (optional, str, None)
+ Protocols supported by the file system.
+
+ It will be overridden by NAS server configuration if NAS Server is ``Multiprotocol``.
+
+
+ description (optional, str, None)
+ Description about the filesystem.
+
+ Description can be removed by passing empty string ("").
+
+
+ smb_properties (optional, dict, None)
+ Advance settings for SMB. It contains optional candidate variables.
+
+
+ is_smb_sync_writes_enabled (optional, bool, None)
+ Indicates whether the synchronous writes option is enabled on the file system.
+
+
+ is_smb_notify_on_access_enabled (optional, bool, None)
+ Indicates whether notifications of changes to directory file structure are enabled.
+
+
+ is_smb_op_locks_enabled (optional, bool, None)
+ Indicates whether opportunistic file locking is enabled on the file system.
+
+
+ is_smb_notify_on_write_enabled (optional, bool, None)
+ Indicates whether file write notifications are enabled on the file system.
+
+
+ smb_notify_on_change_dir_depth (optional, int, None)
+ Integer variable, determines the lowest directory level to which the enabled notifications apply.
+
+ Minimum value is ``1``.
+
+
+
+ data_reduction (optional, bool, None)
+ Boolean variable, specifies whether or not to enable compression. Compression is supported only for thin filesystem.
+
+
+ is_thin (optional, bool, None)
+ Boolean variable, specifies whether or not it is a thin filesystem.
+
+
+ access_policy (optional, str, None)
+ Access policy of a filesystem.
+
+
+ locking_policy (optional, str, None)
+ File system locking policies. These policy choices control whether the NFSv4 range locks must be honored.
+
+
+ tiering_policy (optional, str, None)
+ Tiering policy choices for how the storage resource data will be distributed among the tiers available in the pool.
+
+
+ quota_config (optional, dict, None)
+ Configuration for quota management. It contains optional parameters.
+
+
+ grace_period (optional, int, None)
+ Grace period set in quota configuration after soft limit is reached.
+
+ If *grace_period* is not set during creation of filesystem, it will be set to ``7 days`` by default.
+
+
+ grace_period_unit (optional, str, None)
+ Unit of grace period.
+
+ Default unit is ``days``.
+
+
+ default_hard_limit (optional, int, None)
+ Default hard limit for user quotas and tree quotas.
+
+ If *default_hard_limit* is not set while creation of filesystem, it will be set to ``0B`` by default.
+
+
+ default_soft_limit (optional, int, None)
+ Default soft limit for user quotas and tree quotas.
+
+ If *default_soft_limit* is not set while creation of filesystem, it will be set to ``0B`` by default.
+
+
+ is_user_quota_enabled (optional, bool, None)
+ Indicates whether the user quota is enabled.
+
+ If *is_user_quota_enabled* is not set while creation of filesystem, it will be set to ``false`` by default.
+
+ Parameters *is_user_quota_enabled* and *quota_policy* are mutually exclusive.
+
+
+ quota_policy (optional, str, None)
+ Quota policy set in quota configuration.
+
+ If *quota_policy* is not set while creation of filesystem, it will be set to ``FILE_SIZE`` by default.
+
+ Parameters *is_user_quota_enabled* and *quota_policy* are mutually exclusive.
+
+
+ cap_unit (optional, str, None)
+ Unit of *default_soft_limit* and *default_hard_limit* size.
+
+ Default unit is ``GB``.
+
+
+
+ state (True, str, None)
+ State variable to determine whether filesystem will exist or not.
+
+
+ snap_schedule_name (optional, str, None)
+ This is the name of an existing snapshot schedule which is to be associated with the filesystem.
+
+ This is mutually exclusive with *snapshot_schedule_id*.
+
+
+ snap_schedule_id (optional, str, None)
+ This is the id of an existing snapshot schedule which is to be associated with the filesystem.
+
+ This is mutually exclusive with *snapshot_schedule_name*.
+
+
+ replication_params (optional, dict, None)
+ Settings required for enabling or modifying replication.
+
+
+ replication_name (optional, str, None)
+ Name of the replication session.
+
+
+ new_replication_name (optional, str, None)
+ Replication name to rename the session to.
+
+
+ replication_mode (optional, str, None)
+ The replication mode.
+
+ This is a mandatory field while creating a replication session.
+
+
+ rpo (optional, int, None)
+ Maximum time to wait before the system syncs the source and destination LUNs.
+
+ The *rpo* option should be specified if the *replication_mode* is ``asynchronous``.
+
+ The value should be in range of ``5`` to ``1440`` for ``asynchronous``, ``0`` for ``synchronous`` and ``-1`` for ``manual``.
+
+
+ replication_type (optional, str, None)
+ Type of replication.
+
+
+ remote_system (optional, dict, None)
+ Details of remote system to which the replication is being configured.
+
+ The *remote_system* option should be specified if the *replication_type* is ``remote``.
+
+
+ remote_system_host (True, str, None)
+ IP or FQDN for remote Unity unisphere Host.
+
+
+ remote_system_username (True, str, None)
+ User name of remote Unity unisphere Host.
+
+
+ remote_system_password (True, str, None)
+ Password of remote Unity unisphere Host.
+
+
+ remote_system_verifycert (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate of remote Unity unisphere Host.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ remote_system_port (optional, int, 443)
+ Port at which remote Unity unisphere is hosted.
+
+
+
+ destination_pool_id (optional, str, None)
+ ID of pool to allocate destination filesystem.
+
+
+ destination_pool_name (optional, str, None)
+ Name of pool to allocate destination filesystem.
+
+
+
+ replication_state (optional, str, None)
+ State of the replication.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - SMB shares, NFS exports, and snapshots associated with filesystem need to be deleted prior to deleting a filesystem.
+ - The *quota_config* parameter can be used to update default hard limit and soft limit values to limit the maximum space that can be used. By default they both are set to 0 during filesystem creation which means unlimited.
+ - The *check_mode* is not supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Create FileSystem
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ pool_name: "pool_1"
+ size: 5
+ state: "present"
+
+ - name: Create FileSystem with quota configuration
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ pool_name: "pool_1"
+ size: 5
+ quota_config:
+ grace_period: 8
+ grace_period_unit: "days"
+ default_soft_limit: 10
+ is_user_quota_enabled: False
+ state: "present"
+
+ - name: Expand FileSystem size
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ size: 10
+ state: "present"
+
+ - name: Expand FileSystem size
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ size: 10
+ state: "present"
+
+ - name: Modify FileSystem smb_properties
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ smb_properties:
+ is_smb_op_locks_enabled: True
+ smb_notify_on_change_dir_depth: 5
+ is_smb_notify_on_access_enabled: True
+ state: "present"
+
+ - name: Modify FileSystem Snap Schedule
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_141"
+ snap_schedule_id: "{{snap_schedule_id}}"
+ state: "{{state_present}}"
+
+ - name: Get details of FileSystem using id
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ state: "present"
+
+ - name: Delete a FileSystem using id
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ state: "absent"
+
+ - name: Enable replication on the fs
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ replication_params:
+ replication_name: "test_repl"
+ replication_type: "remote"
+ replication_mode: "asynchronous"
+ rpo: 60
+ remote_system:
+ remote_system_host: '0.1.2.3'
+ remote_system_verifycert: False
+ remote_system_username: 'username'
+ remote_system_password: 'password'
+ destination_pool_name: "pool_test_1"
+ replication_state: "enable"
+ state: "present"
+
+ - name: Modify replication on the fs
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ replication_params:
+ replication_name: "test_repl"
+ new_replication_name: "test_repl_updated"
+ replication_mode: "asynchronous"
+ rpo: 50
+ replication_state: "enable"
+ state: "present"
+
+ - name: Disable replication on the fs
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ replication_state: "disable"
+ state: "present"
+
+ - name: Disable replication by specifying replication_name on the fs
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ replication_params:
+ replication_name: "test_replication"
+ replication_state: "disable"
+ state: "present"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+filesystem_details (When filesystem exists, dict, {'access_policy': 'AccessPolicyEnum.UNIX', 'cifs_notify_on_change_dir_depth': 512, 'cifs_share': None, 'data_reduction_percent': 0, 'data_reduction_ratio': 1.0, 'data_reduction_size_saved': 0, 'description': '', 'existed': True, 'folder_rename_policy': 'FSRenamePolicyEnum.SMB_RENAME_FORBIDDEN', 'format': 'FSFormatEnum.UFS64', 'hash': 8735427610152, 'health': {'UnityHealth': {'hash': 8735427614928}}, 'host_io_size': 'HostIOSizeEnum.GENERAL_8K', 'id': 'fs_65916', 'is_advanced_dedup_enabled': False, 'is_cifs_notify_on_access_enabled': False, 'is_cifs_notify_on_write_enabled': False, 'is_cifs_op_locks_enabled': False, 'is_cifs_sync_writes_enabled': False, 'is_data_reduction_enabled': False, 'is_read_only': False, 'is_smbca': False, 'is_thin_enabled': True, 'locking_policy': 'FSLockingPolicyEnum.MANDATORY', 'metadata_size': 11274289152, 'metadata_size_allocated': 4294967296, 'min_size_allocated': 0, 'name': 'test_fs', 'nas_server': {'id': 'nas_18', 'name': 'test_nas1'}, 'nfs_share': None, 'per_tier_size_used': [6979321856, 0, 0], 'pool': {'id': 'pool_7', 'name': 'pool 7'}, 'pool_full_policy': 'ResourcePoolFullPolicyEnum.FAIL_WRITES', 'quota_config': {'default_hard_limit': '0B', 'default_soft_limit': '0B', 'grace_period': '7.0 days', 'id': 'quotaconfig_171798760421_0', 'is_user_quota_enabled': False, 'quota_policy': 'QuotaPolicyEnum.FILE_SIZE'}, 'replication_sessions': {'current_transfer_est_remain_time': 0, 'id': '***', 'last_sync_time': '2022-05-12 11:20:38+00:00', 'local_role': 'ReplicationSessionReplicationRoleEnum.SOURCE', 'max_time_out_of_sync': 60, 'members': None, 'name': 'local_repl_new', 'network_status': 'ReplicationSessionNetworkStatusEnum.OK', 'remote_system': {'UnityRemoteSystem': {'hash': 8735426929707}}, 'replication_resource_type': 'ReplicationEndpointResourceTypeEnum.FILESYSTEM', 'src_resource_id': 'res_66444', 'src_status': 'ReplicationSessionStatusEnum.OK', 'status': 'ReplicationOpStatusEnum.AUTO_SYNC_CONFIGURED', 'sync_progress': 0, 'sync_state': 'ReplicationSessionSyncStateEnum.IDLE'}, 'size_allocated': 283148288, 'size_allocated_total': 4578148352, 'size_preallocated': 2401173504, 'size_total': 10737418240, 'size_total_with_unit': '10.0 GB', 'size_used': 1620312064, 'snap_count': 2, 'snaps_size': 21474869248, 'snaps_size_allocated': 32768, 'snapshots': [], 'supported_protocols': 'FSSupportedProtocolEnum.NFS', 'tiering_policy': 'TieringPolicyEnum.AUTOTIER_HIGH', 'type': 'FilesystemTypeEnum.FILESYSTEM'})
+ Details of the filesystem.
+
+
+ id (, str, )
+ The system generated ID given to the filesystem.
+
+
+ name (, str, )
+ Name of the filesystem.
+
+
+ description (, str, )
+ Description about the filesystem.
+
+
+ is_data_reduction_enabled (, bool, )
+ Whether or not compression enabled on this filesystem.
+
+
+ size_total_with_unit (, str, )
+ Size of the filesystem with actual unit.
+
+
+ tiering_policy (, str, )
+ Tiering policy applied to this filesystem.
+
+
+ is_cifs_notify_on_access_enabled (, bool, )
+ Indicates whether the system generates a notification when a user accesses the file system.
+
+
+ is_cifs_notify_on_write_enabled (, bool, )
+ Indicates whether the system generates a notification when the file system is written to.
+
+
+ is_cifs_op_locks_enabled (, bool, )
+ Indicates whether opportunistic file locks are enabled for the file system.
+
+
+ is_cifs_sync_writes_enabled (, bool, )
+ Indicates whether the CIFS synchronous writes option is enabled for the file system.
+
+
+ cifs_notify_on_change_dir_depth (, int, )
+ Indicates the lowest directory level to which the enabled notifications apply, if any.
+
+
+ pool (, dict, )
+ The pool in which this filesystem is allocated.
+
+
+ id (, str, )
+ The system ID given to the pool.
+
+
+ name (, str, )
+ The name of the storage pool.
+
+
+
+ nas_server (, dict, )
+ The NAS Server details on which this filesystem is hosted.
+
+
+ id (, str, )
+ The system ID given to the NAS Server.
+
+
+ name (, str, )
+ The name of the NAS Server.
+
+
+
+ snapshots (, list, )
+ The list of snapshots of this filesystem.
+
+
+ id (, str, )
+ The system ID given to the filesystem snapshot.
+
+
+ name (, str, )
+ The name of the filesystem snapshot.
+
+
+
+ is_thin_enabled (, bool, )
+ Indicates whether thin provisioning is enabled for this filesystem.
+
+
+ snap_schedule_id (, str, )
+ Indicates the id of the snap schedule associated with the filesystem.
+
+
+ snap_schedule_name (, str, )
+ Indicates the name of the snap schedule associated with the filesystem.
+
+
+ quota_config (, dict, )
+ Details of quota configuration of the filesystem created.
+
+
+ grace_period (, str, )
+ Grace period set in quota configuration after soft limit is reached.
+
+
+ default_hard_limit (, int, )
+ Default hard limit for user quotas and tree quotas.
+
+
+ default_soft_limit (, int, )
+ Default soft limit for user quotas and tree quotas.
+
+
+ is_user_quota_enabled (, bool, )
+ Indicates whether the user quota is enabled.
+
+
+ quota_policy (, str, )
+ Quota policy set in quota configuration.
+
+
+
+ replication_sessions (, dict, )
+ List of replication sessions if replication is enabled.
+
+
+ id (, str, )
+ ID of replication session
+
+
+ name (, str, )
+ Name of replication session
+
+
+ remote_system (, dict, )
+ Remote system
+
+
+ id (, str, )
+ ID of remote system
+
+
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Arindam Datta (@dattaarindam) <ansible.team@dell.com>
+- Meenakshi Dembi (@dembim) <ansible.team@dell.com>
+- Spandita Panigrahi (@panigs7) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/filesystem_snapshot.rst b/ansible_collections/dellemc/unity/docs/modules/filesystem_snapshot.rst
new file mode 100644
index 000000000..c75f81611
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/filesystem_snapshot.rst
@@ -0,0 +1,341 @@
+.. _filesystem_snapshot_module:
+
+
+filesystem_snapshot -- Manage filesystem snapshot on the Unity storage system
+=============================================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing Filesystem Snapshot on the Unity storage system includes create filesystem snapshot, get filesystem snapshot, modify filesystem snapshot and delete filesystem snapshot.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ snapshot_name (optional, str, None)
+ The name of the filesystem snapshot.
+
+ Mandatory parameter for creating a filesystem snapshot.
+
+ For all other operations either *snapshot_name* or *snapshot_id* is required.
+
+
+ snapshot_id (optional, str, None)
+ During creation snapshot_id is auto generated.
+
+ For all other operations either *snapshot_id* or *snapshot_name* is required.
+
+
+ filesystem_name (optional, str, None)
+ The name of the Filesystem for which snapshot is created.
+
+ For creation of filesystem snapshot either *filesystem_name* or *filesystem_id* is required.
+
+ Not required for other operations.
+
+
+ filesystem_id (optional, str, None)
+ The ID of the Filesystem for which snapshot is created.
+
+ For creation of filesystem snapshot either *filesystem_id* or *filesystem_name* is required.
+
+ Not required for other operations.
+
+
+ nas_server_name (optional, str, None)
+ The name of the NAS server in which the Filesystem is created.
+
+ For creation of filesystem snapshot either *nas_server_name* or *nas_server_id* is required.
+
+ Not required for other operations.
+
+
+ nas_server_id (optional, str, None)
+ The ID of the NAS server in which the Filesystem is created.
+
+ For creation of filesystem snapshot either *filesystem_id* or *filesystem_name* is required.
+
+ Not required for other operations.
+
+
+ auto_delete (optional, bool, None)
+ This option specifies whether or not the filesystem snapshot will be automatically deleted.
+
+ If set to ``true``, the filesystem snapshot will expire based on the pool auto deletion policy.
+
+ If set to ``false``, the filesystem snapshot will not be auto deleted based on the pool auto deletion policy.
+
+ Option *auto_delete* can not be set to ``true``, if *expiry_time* is specified.
+
+ If during creation neither *auto_delete* nor *expiry_time* is mentioned then the filesystem snapshot will be created keeping *auto_delete* as ``true``.
+
+ Once the *expiry_time* is set, then the filesystem snapshot cannot be assigned to the auto delete policy.
+
+
+ expiry_time (optional, str, None)
+ This option is for specifying the date and time after which the filesystem snapshot will expire.
+
+ The time is to be mentioned in UTC timezone.
+
+ The format is "MM/DD/YYYY HH:MM". Year must be in 4 digits.
+
+
+ description (optional, str, None)
+ The additional information about the filesystem snapshot can be provided using this option.
+
+ The description can be removed by passing an empty string.
+
+
+ fs_access_type (optional, str, None)
+ Access type of the filesystem snapshot.
+
+ Required only during creation of filesystem snapshot.
+
+ If not given, snapshot's access type will be ``Checkpoint``.
+
+
+ state (True, str, None)
+ The state option is used to mention the existence of the filesystem snapshot.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Filesystem snapshot cannot be deleted, if it has nfs or smb share.
+ - The *check_mode* is not supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Create Filesystem Snapshot
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap"
+ filesystem_name: "ansible_test_FS"
+ nas_server_name: "lglad069"
+ description: "Created using playbook"
+ auto_delete: True
+ fs_access_type: "Protocol"
+ state: "present"
+
+ - name: Create Filesystem Snapshot with expiry time
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap_1"
+ filesystem_name: "ansible_test_FS_1"
+ nas_server_name: "lglad069"
+ description: "Created using playbook"
+ expiry_time: "04/15/2021 2:30"
+ fs_access_type: "Protocol"
+ state: "present"
+
+ - name: Get Filesystem Snapshot Details using Name
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap"
+ state: "present"
+
+ - name: Get Filesystem Snapshot Details using ID
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_id: "10008000403"
+ state: "present"
+
+ - name: Update Filesystem Snapshot attributes
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap"
+ description: "Description updated"
+ auto_delete: False
+ expiry_time: "04/15/2021 5:30"
+ state: "present"
+
+ - name: Update Filesystem Snapshot attributes using ID
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_id: "10008000403"
+ expiry_time: "04/18/2021 8:30"
+ state: "present"
+
+ - name: Delete Filesystem Snapshot using Name
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap"
+ state: "absent"
+
+ - name: Delete Filesystem Snapshot using ID
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_id: "10008000403"
+ state: "absent"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+filesystem_snapshot_details (When filesystem snapshot exists, dict, {'access_type': 'FilesystemSnapAccessTypeEnum.CHECKPOINT', 'attached_wwn': None, 'creation_time': '2022-10-21 04:42:53.951000+00:00', 'creator_schedule': None, 'creator_type': 'SnapCreatorTypeEnum.USER_CUSTOM', 'creator_user': {'id': 'user_admin'}, 'description': 'Created using playbook', 'existed': True, 'expiration_time': None, 'filesystem_id': 'fs_137', 'filesystem_name': 'test', 'hash': 8739894572587, 'host_access': None, 'id': '171798721695', 'io_limit_policy': None, 'is_auto_delete': True, 'is_modifiable': False, 'is_modified': False, 'is_read_only': True, 'is_system_snap': False, 'last_writable_time': None, 'lun': None, 'name': 'test_FS_snap_1', 'nas_server_id': 'nas_1', 'nas_server_name': 'lglad072', 'parent_snap': None, 'size': 107374182400, 'snap_group': None, 'state': 'SnapStateEnum.READY'})
+ Details of the filesystem snapshot.
+
+
+ access_type (, str, )
+ Access type of filesystem snapshot.
+
+
+ attached_wwn (, str, )
+ Attached WWN details.
+
+
+ creation_time (, str, )
+ Creation time of filesystem snapshot.
+
+
+ creator_schedule (, str, )
+ Creator schedule of filesystem snapshot.
+
+
+ creator_type (, str, )
+ Creator type for filesystem snapshot.
+
+
+ creator_user (, str, )
+ Creator user for filesystem snapshot.
+
+
+ description (, str, )
+ Description of the filesystem snapshot.
+
+
+ expiration_time (, str, )
+ Date and time after which the filesystem snapshot will expire.
+
+
+ is_auto_delete (, bool, )
+ Is the filesystem snapshot is auto deleted or not.
+
+
+ id (, str, )
+ Unique identifier of the filesystem snapshot instance.
+
+
+ name (, str, )
+ The name of the filesystem snapshot.
+
+
+ size (, int, )
+ Size of the filesystem snapshot.
+
+
+ filesystem_name (, str, )
+ Name of the filesystem for which the snapshot exists.
+
+
+ filesystem_id (, str, )
+ Id of the filesystem for which the snapshot exists.
+
+
+ nas_server_name (, str, )
+ Name of the NAS server on which filesystem exists.
+
+
+ nas_server_id (, str, )
+ Id of the NAS server on which filesystem exists.
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Rajshree Khare (@kharer5) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/host.rst b/ansible_collections/dellemc/unity/docs/modules/host.rst
new file mode 100644
index 000000000..b0afe55b9
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/host.rst
@@ -0,0 +1,333 @@
+.. _host_module:
+
+
+host -- Manage Host operations on Unity
+=======================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+The Host module contains the operations Creation of a Host, Addition of initiators to Host, Removal of initiators from Host, Modification of host attributes, Get details of a Host, Deletion of a Host, Addition of network address to Host, Removal of network address from Host.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ host_name (optional, str, None)
+ Name of the host.
+
+ Mandatory for host creation.
+
+
+ host_id (optional, str, None)
+ Unique identifier of the host.
+
+ Host Id is auto generated during creation.
+
+ Except create, all other operations require either *host_id* or Ihost_name).
+
+
+ description (optional, str, None)
+ Host description.
+
+
+ host_os (optional, str, None)
+ Operating system running on the host.
+
+
+ new_host_name (optional, str, None)
+ New name for the host.
+
+ Only required in rename host operation.
+
+
+ initiators (optional, list, None)
+ List of initiators to be added/removed to/from host.
+
+
+ initiator_state (optional, str, None)
+ State of the initiator.
+
+
+ network_address (optional, str, None)
+ Network address to be added/removed to/from the host.
+
+ Enter valid IPV4 or host name.
+
+
+ network_address_state (optional, str, None)
+ State of the Network address.
+
+
+ state (True, str, None)
+ State of the host.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is not supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Create empty Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host"
+ host_os: "Linux"
+ description: "ansible-test-host"
+ state: "present"
+
+ - name: Create Host with Initiators
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host-1"
+ host_os: "Linux"
+ description: "ansible-test-host-1"
+ initiators:
+ - "iqn.1994-05.com.redhat:c38e6e8cfd81"
+ - "20:00:00:90:FA:13:81:8D:10:00:00:90:FA:13:81:8D"
+ initiator_state: "present-in-host"
+ state: "present"
+
+ - name: Modify Host using host_id
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_id: "Host_253"
+ new_host_name: "ansible-test-host-2"
+ host_os: "Mac OS"
+ description: "Ansible tesing purpose"
+ state: "present"
+
+ - name: Add Initiators to Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host-2"
+ initiators:
+ - "20:00:00:90:FA:13:81:8C:10:00:00:90:FA:13:81:8C"
+ initiator_state: "present-in-host"
+ state: "present"
+
+ - name: Get Host details using host_name
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host-2"
+ state: "present"
+
+ - name: Get Host details using host_id
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_id: "Host_253"
+ state: "present"
+
+ - name: Delete Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host-2"
+ state: "absent"
+
+ - name: Add network address to Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "{{host_name}}"
+ network_address: "192.168.1.2"
+ network_address_state: "present-in-host"
+ state: "present"
+
+ - name: Delete network address from Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "{{host_name}}"
+ network_address: "192.168.1.2"
+ network_address_state: "absent-in-host"
+ state: "present"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+host_details (When host exists., dict, {'auto_manage_type': 'HostManageEnum.UNKNOWN', 'datastores': None, 'description': 'ansible-test-host-1', 'existed': True, 'fc_host_initiators': [{'id': 'HostInitiator_1', 'name': 'HostName_1', 'paths': [{'id': 'HostInitiator_1_Id1', 'is_logged_in': True}, {'id': 'HostInitiator_1_Id2', 'is_logged_in': True}]}], 'hash': 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER', 'health': {'UnityHealth': {'hash': 8764429420954}}, 'host_container': None, 'host_luns': [], 'host_polled_uuid': None, 'host_pushed_uuid': None, 'host_uuid': None, 'host_v_vol_datastore': None, 'id': 'Host_2198', 'iscsi_host_initiators': [{'id': 'HostInitiator_2', 'name': 'HostName_2', 'paths': [{'id': 'HostInitiator_2_Id1', 'is_logged_in': True}, {'id': 'HostInitiator_2_Id2', 'is_logged_in': True}]}], 'last_poll_time': None, 'name': 'ansible-test-host-1', 'network_addresses': [], 'os_type': 'Linux', 'registration_type': None, 'storage_resources': None, 'tenant': None, 'type': 'HostTypeEnum.HOST_MANUAL', 'vms': None})
+ Details of the host.
+
+
+ id (, str, )
+ The system ID given to the host.
+
+
+ name (, str, )
+ The name of the host.
+
+
+ description (, str, )
+ Description about the host.
+
+
+ fc_host_initiators (, list, )
+ Details of the FC initiators associated with the host.
+
+
+ id (, str, )
+ Unique identifier of the FC initiator path.
+
+
+ name (, str, )
+ FC Qualified Name (WWN) of the initiator.
+
+
+ paths (, list, )
+ Details of the paths associated with the FC initiator.
+
+
+ id (, str, )
+ Unique identifier of the path.
+
+
+ is_logged_in (, bool, )
+ Indicates whether the host initiator is logged into the storage system.
+
+
+
+
+ iscsi_host_initiators (, list, )
+ Details of the ISCSI initiators associated with the host.
+
+
+ id (, str, )
+ Unique identifier of the ISCSI initiator path.
+
+
+ name (, str, )
+ ISCSI Qualified Name (IQN) of the initiator.
+
+
+ paths (, list, )
+ Details of the paths associated with the ISCSI initiator.
+
+
+ id (, str, )
+ Unique identifier of the path.
+
+
+ is_logged_in (, bool, )
+ Indicates whether the host initiator is logged into the storage system.
+
+
+
+
+ network_addresses (, list, )
+ List of network addresses mapped to the host.
+
+
+ os_type (, str, )
+ Operating system running on the host.
+
+
+ type (, str, )
+ HostTypeEnum of the host.
+
+
+ host_luns (, list, )
+ Details of luns attached to host.
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Rajshree Khare (@kharer5) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/info.rst b/ansible_collections/dellemc/unity/docs/modules/info.rst
new file mode 100644
index 000000000..7b1ef111c
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/info.rst
@@ -0,0 +1,582 @@
+.. _info_module:
+
+
+info -- Gathering information about Unity
+=========================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Gathering information about Unity storage system includes Get the details of Unity array, Get list of Hosts in Unity array, Get list of FC initiators in Unity array, Get list of iSCSI initiators in Unity array, Get list of Consistency groups in Unity array, Get list of Storage pools in Unity array, Get list of Volumes in Unity array, Get list of Snapshot schedules in Unity array, Get list of NAS servers in Unity array, Get list of File systems in Unity array, Get list of Snapshots in Unity array, Get list of SMB shares in Unity array, Get list of NFS exports in Unity array, Get list of User quotas in Unity array, Get list of Quota tree in Unity array, Get list of NFS Servers in Unity array, Get list of CIFS Servers in Unity array. Get list of Ethernet ports in Unity array. Get list of File interfaces used in Unity array.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ gather_subset (optional, list, None)
+ List of string variables to specify the Unity storage system entities for which information is required.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Get detailed list of Unity entities
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - host
+ - fc_initiator
+ - iscsi_initiator
+ - cg
+ - storage_pool
+ - vol
+ - snapshot_schedule
+ - nas_server
+ - file_system
+ - snapshot
+ - nfs_export
+ - smb_share
+ - user_quota
+ - tree_quota
+ - disk_group
+ - nfs_server
+ - cifs_server
+ - ethernet_port
+ - file_interface
+
+ - name: Get information of Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+
+ - name: Get list of hosts on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - host
+
+ - name: Get list of FC initiators on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - fc_initiator
+
+ - name: Get list of ISCSI initiators on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - iscsi_initiator
+
+ - name: Get list of consistency groups on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - cg
+
+ - name: Get list of storage pools on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - storage_pool
+
+ - name: Get list of volumes on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - vol
+
+ - name: Get list of snapshot schedules on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - snapshot_schedule
+
+ - name: Get list of NAS Servers on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - nas_server
+
+ - name: Get list of File Systems on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - file_system
+
+ - name: Get list of Snapshots on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - snapshot
+
+ - name: Get list of NFS exports on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - nfs_export
+
+ - name: Get list of SMB shares on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - smb_share
+
+ - name: Get list of user quotas on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - user_quota
+
+ - name: Get list of quota trees on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - tree_quota
+
+ - name: Get list of disk groups on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - disk_group
+
+ - name: Get list of NFS Servers on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - nfs_server
+
+ - name: Get list of CIFS Servers on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - cifs_server
+
+ - name: Get list of ethernet ports on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - ethernet_port
+
+ - name: Get list of file interfaces on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - file_interface
+
+
+
+Return Values
+-------------
+
+Array_Details (always, dict, {'api_version': '12.0', 'earliest_api_version': '4.0', 'existed': True, 'hash': 8766644083532, 'id': '0', 'model': 'Unity 480', 'name': 'APM00213404195', 'software_version': '5.2.1'})
+ Details of the Unity Array.
+
+
+ api_version (, str, )
+ The current api version of the Unity Array.
+
+
+ earliest_api_version (, str, )
+ The earliest api version of the Unity Array.
+
+
+ model (, str, )
+ The model of the Unity Array.
+
+
+ name (, str, )
+ The name of the Unity Array.
+
+
+ software_version (, str, )
+ The software version of the Unity Array.
+
+
+
+Hosts (When hosts exist., list, [{'auto_manage_type': 'HostManageEnum.UNKNOWN', 'datastores': None, 'description': '', 'existed': True, 'fc_host_initiators': None, 'hash': 8762200072289, 'health': {'UnityHealth': {'hash': 8762200072352}}, 'host_container': None, 'host_ip_ports': {'UnityHostIpPortList': [{'UnityHostIpPort': {'hash': 8762200072361}}]}, 'host_luns': None, 'host_polled_uuid': None, 'host_pushed_uuid': None, 'host_uuid': None, 'host_v_vol_datastore': None, 'id': 'Host_2191', 'iscsi_host_initiators': None, 'last_poll_time': None, 'name': '10.225.2.153', 'os_type': 'Linux', 'registration_type': None, 'storage_resources': None, 'tenant': None, 'type': 'HostTypeEnum.HOST_MANUAL', 'vms': None}])
+ Details of the hosts.
+
+
+ id (, str, )
+ The ID of the host.
+
+
+ name (, str, )
+ The name of the host.
+
+
+
+FC_initiators (When FC initiator exist., list, [{'WWN': '20:00:00:0E:1E:E9:B8:FC:21:00:00:0E:1E:E9:B8:FC', 'id': 'HostInitiator_3'}, {'WWN': '20:00:00:0E:1E:E9:B8:F7:21:00:00:0E:1E:E9:B8:F7', 'id': 'HostInitiator_4'}])
+ Details of the FC initiators.
+
+
+ WWN (, str, )
+ The WWN of the FC initiator.
+
+
+ id (, str, )
+ The id of the FC initiator.
+
+
+
+ISCSI_initiators (When ISCSI initiators exist., list, [{'IQN': 'iqn.1994-05.com.redhat:634d768090f', 'id': 'HostInitiator_1'}, {'IQN': 'iqn.1994-05.com.redhat:2835ba62cc6d', 'id': 'HostInitiator_2'}])
+ Details of the ISCSI initiators.
+
+
+ IQN (, str, )
+ The IQN of the ISCSI initiator.
+
+
+ id (, str, )
+ The id of the ISCSI initiator.
+
+
+
+Consistency_Groups (When Consistency Groups exist., list, [{'advanced_dedup_status': 'DedupStatusEnum.DISABLED', 'block_host_access': {'UnityBlockHostAccessList': [{'UnityBlockHostAccess': {'hash': 8745385821206}}, {'UnityBlockHostAccess': {'hash': 8745386530115}}, {'UnityBlockHostAccess': {'hash': 8745386530124}}]}, 'data_reduction_percent': 0, 'data_reduction_ratio': 1.0, 'data_reduction_size_saved': 0, 'data_reduction_status': 'DataReductionStatusEnum.DISABLED', 'datastores': None, 'dedup_status': None, 'description': 'CG has created with all parametres.', 'esx_filesystem_block_size': None, 'esx_filesystem_major_version': None, 'existed': True, 'filesystem': None, 'hash': 8745385801328, 'health': {'UnityHealth': {'hash': 8745386647098}}, 'host_v_vol_datastore': None, 'id': 'res_93', 'is_replication_destination': False, 'is_snap_schedule_paused': False, 'luns': {'UnityLunList': [{'UnityLun': {'hash': 8745389830024, 'id': 'sv_64'}}, {'UnityLun': {'hash': 8745386526751, 'id': 'sv_63'}}]}, 'metadata_size': 8858370048, 'metadata_size_allocated': 7516192768, 'name': 'CG1_Ansible_Test_SS', 'per_tier_size_used': [11811160064, 0, 0], 'pools': {'UnityPoolList': [{'UnityPool': {'hash': 8745386552375, 'id': 'pool_3'}}]}, 'relocation_policy': 'TieringPolicyEnum.AUTOTIER', 'replication_type': 'ReplicationTypeEnum.NONE', 'size_allocated': 99418112, 'size_total': 268435456000, 'size_used': None, 'snap_count': 1, 'snap_schedule': {'UnitySnapSchedule': {'hash': 8745386550224, 'id': 'snapSch_66'}}, 'snaps_size_allocated': 8888320, 'snaps_size_total': 108675072, 'thin_status': 'ThinStatusEnum.TRUE', 'type': 'StorageResourceTypeEnum.CONSISTENCY_GROUP', 'virtual_volumes': None, 'vmware_uuid': None}])
+ Details of the Consistency Groups.
+
+
+ id (, str, )
+ The ID of the Consistency Group.
+
+
+ name (, str, )
+ The name of the Consistency Group.
+
+
+
+Storage_Pools (When Storage Pools exist., list, [{'alert_threshold': 70, 'creation_time': '2021-10-18 12:45:12+00:00', 'description': '', 'existed': True, 'harvest_state': 'UsageHarvestStateEnum.PAUSED_COULD_NOT_REACH_HWM', 'hash': 8741501012399, 'health': {'UnityHealth': {'hash': 8741501012363}}, 'id': 'pool_2', 'is_all_flash': False, 'is_empty': False, 'is_fast_cache_enabled': False, 'is_harvest_enabled': True, 'is_snap_harvest_enabled': False, 'metadata_size_subscribed': 312458870784, 'metadata_size_used': 244544700416, 'name': 'fastVP_pool', 'object_id': 12884901891, 'pool_fast_vp': {'UnityPoolFastVp': {'hash': 8741501228023}}, 'pool_space_harvest_high_threshold': 95.0, 'pool_space_harvest_low_threshold': 85.0, 'pool_type': 'StoragePoolTypeEnum.TRADITIONAL', 'raid_type': 'RaidTypeEnum.RAID5', 'rebalance_progress': None, 'size_free': 2709855928320, 'size_subscribed': 2499805044736, 'size_total': 3291018690560, 'size_used': 455513956352, 'snap_size_subscribed': 139720515584, 'snap_size_used': 66002944, 'snap_space_harvest_high_threshold': 25.0, 'snap_space_harvest_low_threshold': 20.0, 'tiers': {'UnityPoolTierList': [{'UnityPoolTier': {'hash': 8741500996410}}, {'UnityPoolTier': {'hash': 8741501009430}}, {'UnityPoolTier': {'hash': 8741501009508}}]}}])
+ Details of the Storage Pools.
+
+
+ id (, str, )
+ The ID of the Storage Pool.
+
+
+ name (, str, )
+ The name of the Storage Pool.
+
+
+
+Volumes (When Volumes exist., list, [{'current_node': 'NodeEnum.SPB', 'data_reduction_percent': 0, 'data_reduction_ratio': 1.0, 'data_reduction_size_saved': 0, 'default_node': 'NodeEnum.SPB', 'description': None, 'effective_io_limit_max_iops': None, 'effective_io_limit_max_kbps': None, 'existed': True, 'family_base_lun': {'UnityLun': {'hash': 8774260820794, 'id': 'sv_27'}}, 'family_clone_count': 0, 'hash': 8774260854260, 'health': {'UnityHealth': {'hash': 8774260812499}}, 'host_access': {'UnityBlockHostAccessList': [{'UnityBlockHostAccess': {'hash': 8774260826387}}]}, 'id': 'sv_27', 'io_limit_policy': None, 'is_advanced_dedup_enabled': False, 'is_compression_enabled': None, 'is_data_reduction_enabled': False, 'is_replication_destination': False, 'is_snap_schedule_paused': False, 'is_thin_clone': False, 'is_thin_enabled': False, 'metadata_size': 4294967296, 'metadata_size_allocated': 4026531840, 'name': 'VSI-UNITY-test-task', 'per_tier_size_used': [111400714240, 0, 0], 'pool': {'UnityPool': {'hash': 8774260811427}}, 'size_allocated': 107374182400, 'size_total': 107374182400, 'size_used': None, 'snap_count': 0, 'snap_schedule': None, 'snap_wwn': '60:06:01:60:5C:F0:50:00:94:3E:91:4D:51:5A:4F:97', 'snaps_size': 0, 'snaps_size_allocated': 0, 'storage_resource': {'UnityStorageResource': {'hash': 8774267822228}}, 'tiering_policy': 'TieringPolicyEnum.AUTOTIER_HIGH', 'type': 'LUNTypeEnum.VMWARE_ISCSI', 'wwn': '60:06:01:60:5C:F0:50:00:00:B5:95:61:2E:34:DB:B2'}])
+ Details of the Volumes.
+
+
+ id (, str, )
+ The ID of the Volume.
+
+
+ name (, str, )
+ The name of the Volume.
+
+
+
+Snapshot_Schedules (When Snapshot Schedules exist., list, [{'existed': True, 'hash': 8775599492651, 'id': 'snapSch_1', 'is_default': True, 'is_modified': None, 'is_sync_replicated': False, 'luns': None, 'modification_time': '2021-08-18 19:10:33.774000+00:00', 'name': 'CEM_DEFAULT_SCHEDULE_DEFAULT_PROTECTION', 'rules': {'UnitySnapScheduleRuleList': [{'UnitySnapScheduleRule': {'hash': 8775599498593}}]}, 'storage_resources': {'UnityStorageResourceList': [{'UnityStorageResource': {'hash': 8775599711597, 'id': 'res_88'}}, {'UnityStorageResource': {'hash': 8775599711528, 'id': 'res_3099'}}]}, 'version': 'ScheduleVersionEnum.LEGACY'}])
+ Details of the Snapshot Schedules.
+
+
+ id (, str, )
+ The ID of the Snapshot Schedule.
+
+
+ name (, str, )
+ The name of the Snapshot Schedule.
+
+
+
+NAS_Servers (When NAS Servers exist., list, [{'allow_unmapped_user': None, 'cifs_server': None, 'current_sp': {'UnityStorageProcessor': {'hash': 8747629920422, 'id': 'spb'}}, 'current_unix_directory_service': 'NasServerUnixDirectoryServiceEnum.NONE', 'default_unix_user': None, 'default_windows_user': None, 'existed': True, 'file_dns_server': None, 'file_interface': {'UnityFileInterfaceList': [{'UnityFileInterface': {'hash': 8747626606870, 'id': 'if_6'}}]}, 'filesystems': {'UnityFileSystemList': [{'UnityFileSystem': {'hash': 8747625901355, 'id': 'fs_6892'}}]}, 'hash': 8747625900370, 'health': {'UnityHealth': {'hash': 8747625900493}}, 'home_sp': {'UnityStorageProcessor': {'hash': 8747625877420, 'id': 'spb'}}, 'id': 'nas_1', 'is_backup_only': False, 'is_multi_protocol_enabled': False, 'is_packet_reflect_enabled': False, 'is_replication_destination': False, 'is_replication_enabled': False, 'is_windows_to_unix_username_mapping_enabled': None, 'name': 'lglad072', 'pool': {'UnityPool': {'hash': 8747629920479, 'id': 'pool_3'}}, 'preferred_interface_settings': {'UnityPreferredInterfaceSettings': {'hash': 8747626625166, 'id': 'preferred_if_1'}}, 'replication_type': 'ReplicationTypeEnum.NONE', 'size_allocated': 2952790016, 'tenant': None, 'virus_checker': {'UnityVirusChecker': {'hash': 8747626604144, 'id': 'cava_1'}}}])
+ Details of the NAS Servers.
+
+
+ id (, str, )
+ The ID of the NAS Server.
+
+
+ name (, str, )
+ The name of the NAS Server.
+
+
+
+File_Systems (When File Systems exist., list, [{'access_policy': 'AccessPolicyEnum.UNIX', 'cifs_notify_on_change_dir_depth': 512, 'cifs_share': None, 'data_reduction_percent': 0, 'data_reduction_ratio': 1.0, 'data_reduction_size_saved': 0, 'description': '', 'existed': True, 'folder_rename_policy': 'FSRenamePolicyEnum.SMB_RENAME_FORBIDDEN', 'format': 'FSFormatEnum.UFS64', 'hash': 8786518053735, 'health': {'UnityHealth': {'hash': 8786518049091}}, 'host_io_size': 'HostIOSizeEnum.GENERAL_8K', 'id': 'fs_12', 'is_advanced_dedup_enabled': False, 'is_cifs_notify_on_access_enabled': False, 'is_cifs_notify_on_write_enabled': False, 'is_cifs_op_locks_enabled': True, 'is_cifs_sync_writes_enabled': False, 'is_data_reduction_enabled': False, 'is_read_only': False, 'is_smbca': False, 'is_thin_enabled': True, 'locking_policy': 'FSLockingPolicyEnum.MANDATORY', 'metadata_size': 4294967296, 'metadata_size_allocated': 3758096384, 'min_size_allocated': 0, 'name': 'vro-daniel-test', 'nas_server': {'UnityNasServer': {'hash': 8786517296113, 'id': 'nas_1'}}, 'nfs_share': None, 'per_tier_size_used': [6442450944, 0, 0], 'pool': {'UnityPool': {'hash': 8786518259493, 'id': 'pool_3'}}, 'pool_full_policy': 'ResourcePoolFullPolicyEnum.FAIL_WRITES', 'size_allocated': 283148288, 'size_allocated_total': 4041244672, 'size_preallocated': 2401206272, 'size_total': 107374182400, 'size_used': 1620312064, 'snap_count': 0, 'snaps_size': 0, 'snaps_size_allocated': 0, 'storage_resource': {'UnityStorageResource': {'hash': 8786518044167, 'id': 'res_20'}}, 'supported_protocols': 'FSSupportedProtocolEnum.NFS', 'tiering_policy': 'TieringPolicyEnum.AUTOTIER_HIGH', 'type': 'FilesystemTypeEnum.FILESYSTEM'}])
+ Details of the File Systems.
+
+
+ id (, str, )
+ The ID of the File System.
+
+
+ name (, str, )
+ The name of the File System.
+
+
+
+Snapshots (When Snapshots exist., list, [{'access_type': 'FilesystemSnapAccessTypeEnum.CHECKPOINT', 'attached_wwn': None, 'creation_time': '2022-04-06 11:19:26.818000+00:00', 'creator_schedule': None, 'creator_type': 'SnapCreatorTypeEnum.REP_V2', 'creator_user': None, 'description': '', 'existed': True, 'expiration_time': None, 'hash': 8739100256648, 'host_access': None, 'id': '38654716464', 'io_limit_policy': None, 'is_auto_delete': False, 'is_modifiable': False, 'is_modified': False, 'is_read_only': True, 'is_system_snap': True, 'last_writable_time': None, 'lun': {'UnityLun': {'hash': 8739100148962, 'id': 'sv_301'}}, 'name': '42949677504_APM00213404195_0000.ckpt000_9508038064690266.2_238', 'parent_snap': None, 'size': 3221225472, 'snap_group': None, 'state': 'SnapStateEnum.READY', 'storage_resource': {'UnityStorageResource': {'hash': 8739100173002, 'id': 'sv_301'}}}])
+ Details of the Snapshots.
+
+
+ id (, str, )
+ The ID of the Snapshot.
+
+
+ name (, str, )
+ The name of the Snapshot.
+
+
+
+NFS_Exports (When NFS Exports exist., list, [{'anonymous_gid': 4294967294, 'anonymous_uid': 4294967294, 'creation_time': '2021-12-01 06:21:48.381000+00:00', 'default_access': 'NFSShareDefaultAccessEnum.NO_ACCESS', 'description': '', 'existed': True, 'export_option': 1, 'export_paths': ['10.230.24.20:/zack_nfs_01'], 'filesystem': {'UnityFileSystem': {'hash': 8747298565566, 'id': 'fs_67'}}, 'hash': 8747298565548, 'host_accesses': None, 'id': 'NFSShare_29', 'is_read_only': None, 'min_security': 'NFSShareSecurityEnum.SYS', 'modification_time': '2022-04-01 11:44:17.553000+00:00', 'name': 'zack_nfs_01', 'nfs_owner_username': None, 'no_access_hosts': None, 'no_access_hosts_string': '10.226.198.207,10.226.198.25,10.226.198.44,10.226.198.85,Host1, Host2,Host4,Host5,Host6,10.10.0.0/255.255.240.0', 'path': '/', 'read_only_hosts': None, 'read_only_hosts_string': '', 'read_only_root_access_hosts': None, 'read_only_root_hosts_string': '', 'read_write_hosts': None, 'read_write_hosts_string': '', 'read_write_root_hosts_string': '', 'role': 'NFSShareRoleEnum.PRODUCTION', 'root_access_hosts': None, 'snap': None, 'type': 'NFSTypeEnum.NFS_SHARE'}])
+ Details of the NFS Exports.
+
+
+ id (, str, )
+ The ID of the NFS Export.
+
+
+ name (, str, )
+ The name of the NFS Export.
+
+
+
+SMB_Shares (When SMB Shares exist., list, [{'creation_time': '2022-03-17 11:56:54.867000+00:00', 'description': '', 'existed': True, 'export_paths': ['\\\\multi-prot-pie.extreme1.com\\multi-prot-hui', '\\\\10.230.24.26\\multi-prot-hui'], 'filesystem': {'UnityFileSystem': {'hash': 8741295638110, 'id': 'fs_140'}}, 'hash': 8741295638227, 'id': 'SMBShare_20', 'is_abe_enabled': False, 'is_ace_enabled': False, 'is_branch_cache_enabled': False, 'is_continuous_availability_enabled': False, 'is_dfs_enabled': False, 'is_encryption_enabled': False, 'is_read_only': None, 'modified_time': '2022-03-17 11:56:54.867000+00:00', 'name': 'multi-prot-hui', 'offline_availability': 'CifsShareOfflineAvailabilityEnum.NONE', 'path': '/', 'snap': None, 'type': 'CIFSTypeEnum.CIFS_SHARE', 'umask': '022'}])
+ Details of the SMB Shares.
+
+
+ id (, str, )
+ The ID of the SMB Share.
+
+
+ name (, str, )
+ The name of the SMB Share.
+
+
+
+User_Quotas (When user quotas exist., list, [{'id': 'userquota_171798694698_0_60000', 'uid': 60000}, {'id': 'userquota_171798694939_0_5001', 'uid': 5001}])
+ Details of the user quotas.
+
+
+ id (, str, )
+ The ID of the user quota.
+
+
+ uid (, str, )
+ The UID of the user quota.
+
+
+
+Tree_Quotas (When quota trees exist., list, [{'id': 'treequota_171798709589_1', 'path': '/vro-ui-fs-rkKfimmN'}, {'id': 'treequota_171798709590_1', 'path': '/vro-ui-fs-mGYXAMqk'}])
+ Details of the quota trees.
+
+
+ id (, str, )
+ The ID of the quota tree.
+
+
+ path (, str, )
+ The path of the quota tree.
+
+
+
+Disk_Groups (When disk groups exist., list, [{'id': 'dg_3', 'name': '400 GB SAS Flash 2', 'tier_type': 'EXTREME_PERFORMANCE'}, {'id': 'dg_16', 'name': '600 GB SAS 10K', 'tier_type': 'PERFORMANCE'}])
+ Details of the disk groups.
+
+
+ id (, str, )
+ The ID of the disk group.
+
+
+ name (, str, )
+ The name of the disk group.
+
+
+ tier_type (, str, )
+ The tier type of the disk group.
+
+
+
+NFS_Servers (When NFS Servers exist., list, [{'id': 'nfs_3'}, {'id': 'nfs_4'}, {'id': 'nfs_9'}])
+ Details of the NFS Servers.
+
+
+ id (, str, )
+ The ID of the NFS Servers.
+
+
+
+CIFS_Servers (When CIFS Servers exist., list, [{'id': 'cifs_3', 'name': 'test_cifs_1'}, {'id': 'cifs_4', 'name': 'test_cifs_2'}, {'id': 'cifs_9', 'name': 'test_cifs_3'}])
+ Details of the CIFS Servers.
+
+
+ id (, str, )
+ The ID of the CIFS Servers.
+
+
+ name (, str, )
+ The name of the CIFS server.
+
+
+
+Ethernet_ports (When ethernet ports exist., list, [{'id': 'spa_mgmt', 'name': 'SP A Management Port'}, {'id': 'spa_ocp_0_eth0', 'name': 'SP A 4-Port Card Ethernet Port 0'}, {'id': 'spa_ocp_0_eth1', 'name': 'SP A 4-Port Card Ethernet Port 1'}])
+ Details of the ethernet ports.
+
+
+ id (, str, )
+ The ID of the ethernet port.
+
+
+ name (, str, )
+ The name of the ethernet port.
+
+
+
+File_interfaces (When file inetrface exist., list, [{'id': 'if_3', 'ip_address': 'xx.xx.xx.xx', 'name': '1_APMXXXXXXXXXX'}, {'id': 'if_3', 'ip_address': 'xx.xx.xx.xx', 'name': '2_APMXXXXXXXXXX'}, {'id': 'if_3', 'ip_address': 'xx.xx.xx.xx', 'name': '3_APMXXXXXXXXXX'}])
+ Details of the file inetrfaces.
+
+
+ id (, str, )
+ The ID of the file inetrface.
+
+
+ name (, str, )
+ The name of the file inetrface.
+
+
+ ip_address (, str, )
+ IP address of the file inetrface.
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Rajshree Khare (@kharer5) <ansible.team@dell.com>
+- Akash Shendge (@shenda1) <ansible.team@dell.com>
+- Meenakshi Dembi (@dembim) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/interface.rst b/ansible_collections/dellemc/unity/docs/modules/interface.rst
new file mode 100644
index 000000000..aad1c02e8
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/interface.rst
@@ -0,0 +1,254 @@
+.. _interface_module:
+
+
+interface -- Manage Interfaces on Unity storage system
+======================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing the Interfaces on the Unity storage system includes adding Interfaces to NAS Server, getting details of interface and deleting configured interfaces.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ nas_server_name (optional, str, None)
+ Name of the NAS server for which interface will be configured.
+
+
+ nas_server_id (optional, str, None)
+ ID of the NAS server for which interface will be configured.
+
+
+ ethernet_port_name (optional, str, None)
+ Name of the ethernet port.
+
+
+ ethernet_port_id (optional, str, None)
+ ID of the ethernet port.
+
+
+ role (optional, str, None)
+ Indicates whether interface is configured as production or backup.
+
+
+ interface_ip (True, str, None)
+ IP of network interface.
+
+
+ netmask (optional, str, None)
+ Netmask of network interface.
+
+
+ prefix_length (optional, int, None)
+ Prefix length is mutually exclusive with *netmask*.
+
+
+ gateway (optional, str, None)
+ Gateway of network interface.
+
+
+ vlan_id (optional, int, None)
+ Vlan id of the interface.
+
+
+ state (True, str, None)
+ Define whether the interface should exist or not.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is supported.
+ - Modify operation for interface is not supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+
+ - name: Add Interface as Backup to NAS Server
+ dellemc.unity.interface:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ ethernet_port_name: "SP A 4-Port Card Ethernet Port 0"
+ role: "BACKUP"
+ interface_ip: "xx.xx.xx.xx"
+ netmask: "xx.xx.xx.xx"
+ gateway: "xx.xx.xx.xx"
+ vlan_id: 324
+ state: "present"
+
+ - name: Add Interface as Production to NAS Server
+ dellemc.unity.interface:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ ethernet_port_name: "SP A 4-Port Card Ethernet Port 0"
+ role: "PRODUCTION"
+ interface_ip: "xx.xx.xx.xx"
+ netmask: "xx.xx.xx.xx"
+ gateway: "xx.xx.xx.xx"
+ vlan_id: 324
+ state: "present"
+
+ - name: Get interface details
+ dellemc.unity.interface:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ interface_ip: "xx.xx.xx.xx"
+ state: "present"
+
+ - name: Delete Interface
+ dellemc.unity.interface:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ interface_ip: "xx.xx.xx.xx"
+ state: "absent"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+interface_details (When interface is configured for NAS Server., dict, {'existed': True, 'gateway': 'xx.xx.xx.xx', 'hash': 8785300560421, 'health': {'UnityHealth': {'hash': 8785300565468}}, 'id': 'if_69', 'ip_address': '10.10.10.10', 'ip_port': {'UnityIpPort': {'hash': 8785300565300, 'id': 'spb_ocp_0_eth0'}}, 'ip_protocol_version': 'IpProtocolVersionEnum.IPv4', 'is_disabled': False, 'is_preferred': True, 'mac_address': '0C:48:C6:9F:57:BF', 'name': '36_APM00213404194', 'nas_server': {'UnityNasServer': {'hash': 8785300565417, 'id': 'nas_10'}}, 'netmask': '10.10.10.10', 'replication_policy': None, 'role': 'FileInterfaceRoleEnum.PRODUCTION', 'source_parameters': None, 'v6_prefix_length': None, 'vlan_id': 324})
+ Details of the interface.
+
+
+ existed (, bool, )
+ Indicates if interface exists.
+
+
+ gateway (, str, )
+ Gateway of network interface.
+
+
+ id (, str, )
+ Unique identifier interface.
+
+
+ ip_address (, str, )
+ IP address of interface.
+
+
+ ip_port (, dict, )
+ Port on which network interface is configured.
+
+
+ id (, str, )
+ ID of ip_port.
+
+
+
+ ip_protocol_version (, str, )
+ IP protocol version.
+
+
+ is_disabled (, bool, )
+ Indicates whether interface is disabled.
+
+
+ is_preferred (, bool, )
+ Indicates whether interface is preferred.
+
+
+ mac_address (, bool, )
+ Mac address of ip_port.
+
+
+ name (, bool, )
+ System configured name of interface.
+
+
+ nas_server (, dict, )
+ Details of NAS server where interface is configured.
+
+
+ id (, str, )
+ ID of NAS Server.
+
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Meenakshi Dembi (@dembim) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/nasserver.rst b/ansible_collections/dellemc/unity/docs/modules/nasserver.rst
new file mode 100644
index 000000000..284f37326
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/nasserver.rst
@@ -0,0 +1,468 @@
+.. _nasserver_module:
+
+
+nasserver -- Manage NAS servers on Unity storage system
+=======================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing NAS servers on Unity storage system includes get, modification to the NAS servers.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ nas_server_id (optional, str, None)
+ The ID of the NAS server.
+
+ Either *nas_server_name* or *nas_server_id* is required to perform the task.
+
+ The parameters *nas_server_name* and *nas_server_id* are mutually exclusive.
+
+
+ nas_server_name (optional, str, None)
+ The Name of the NAS server.
+
+ Either *nas_server_name* or *nas_server_id* is required to perform the task.
+
+ The parameters *nas_server_name* and *nas_server_id* are mutually exclusive.
+
+
+ nas_server_new_name (optional, str, None)
+ The new name of the NAS server.
+
+ It can be mentioned during modification of the NAS server.
+
+
+ is_replication_destination (optional, bool, None)
+ It specifies whether the NAS server is a replication destination.
+
+ It can be mentioned during modification of the NAS server.
+
+
+ is_backup_only (optional, bool, None)
+ It specifies whether the NAS server is used as backup only.
+
+ It can be mentioned during modification of the NAS server.
+
+
+ is_multiprotocol_enabled (optional, bool, None)
+ This parameter indicates whether multiprotocol sharing mode is enabled.
+
+ It can be mentioned during modification of the NAS server.
+
+
+ allow_unmapped_user (optional, bool, None)
+ This flag is used to mandatorily disable access in case of any user mapping failure.
+
+ If ``true``, then enable access in case of any user mapping failure.
+
+ If ``false``, then disable access in case of any user mapping failure.
+
+ It can be mentioned during modification of the NAS server.
+
+
+ default_windows_user (optional, str, None)
+ Default windows user name used for granting access in the case of Unix to Windows user mapping failure.
+
+ It can be mentioned during modification of the NAS server.
+
+
+ default_unix_user (optional, str, None)
+ Default Unix user name used for granting access in the case of Windows to Unix user mapping failure.
+
+ It can be mentioned during modification of the NAS server.
+
+
+ enable_windows_to_unix_username_mapping (optional, bool, None)
+ This parameter indicates whether a Unix to/from Windows user name mapping is enabled.
+
+ It can be mentioned during modification of the NAS server.
+
+
+ is_packet_reflect_enabled (optional, bool, None)
+ If the packet has to be reflected, then this parameter has to be set to ``true``.
+
+ It can be mentioned during modification of the NAS server.
+
+
+ current_unix_directory_service (optional, str, None)
+ This is the directory service used for querying identity information for UNIX (such as UIDs, GIDs, net groups).
+
+ It can be mentioned during modification of the NAS server.
+
+
+ replication_params (optional, dict, None)
+ Settings required for enabling replication.
+
+
+ destination_nas_server_name (optional, str, None)
+ Name of the destination nas server.
+
+ Default value will be source nas server name prefixed by 'DR_'.
+
+
+ replication_mode (optional, str, None)
+ The replication mode.
+
+ This is mandatory to enable replication.
+
+
+ rpo (optional, int, None)
+ Maximum time to wait before the system syncs the source and destination LUNs.
+
+ The *rpo* option should be specified if the *replication_mode* is ``asynchronous``.
+
+ The value should be in range of ``5`` to ``1440``.
+
+
+ replication_type (optional, str, None)
+ Type of replication.
+
+
+ remote_system (optional, dict, None)
+ Details of remote system to which the replication is being configured.
+
+ The *remote_system* option should be specified if the *replication_type* is ``remote``.
+
+
+ remote_system_host (True, str, None)
+ IP or FQDN for remote Unity unisphere Host.
+
+
+ remote_system_username (True, str, None)
+ User name of remote Unity unisphere Host.
+
+
+ remote_system_password (True, str, None)
+ Password of remote Unity unisphere Host.
+
+
+ remote_system_verifycert (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate of remote Unity unisphere Host.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ remote_system_port (optional, int, 443)
+ Port at which remote Unity unisphere is hosted.
+
+
+
+ destination_pool_name (optional, str, None)
+ Name of pool to allocate destination Luns.
+
+ Mutually exclusive with *destination_pool_id*.
+
+
+ destination_pool_id (optional, str, None)
+ Id of pool to allocate destination Luns.
+
+ Mutually exclusive with *destination_pool_name*.
+
+
+ destination_sp (optional, str, None)
+ Storage process of destination nas server
+
+
+ is_backup (optional, bool, None)
+ Indicates if the destination nas server is backup.
+
+
+ replication_name (optional, str, None)
+ User defined name for replication session.
+
+
+ new_replication_name (optional, str, None)
+ Replication name to rename the session to.
+
+
+
+ replication_state (optional, str, None)
+ State of the replication.
+
+
+ replication_reuse_resource (optional, bool, None)
+ This parameter indicates if existing NAS Server is to be used for replication.
+
+
+ state (True, str, None)
+ Define the state of NAS server on the array.
+
+ The value present indicates that NAS server should exist on the system after the task is executed.
+
+ In this release deletion of NAS server is not supported. Hence, if state is set to ``absent`` for any existing NAS server then error will be thrown.
+
+ For any non-existing NAS server, if state is set to ``absent`` then it will return None.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is not supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+
+ - name: Get Details of NAS Server
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "{{nas_server_name}}"
+ state: "present"
+
+ - name: Modify Details of NAS Server
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "{{nas_server_name}}"
+ nas_server_new_name: "updated_sample_nas_server"
+ is_replication_destination: False
+ is_backup_only: False
+ is_multiprotocol_enabled: True
+ allow_unmapped_user: True
+ default_unix_user: "default_unix_sample_user"
+ default_windows_user: "default_windows_sample_user"
+ enable_windows_to_unix_username_mapping: True
+ current_unix_directory_service: "LDAP"
+ is_packet_reflect_enabled: True
+ state: "present"
+
+ - name: Enable replication for NAS Server on Local System
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_id: "nas_10"
+ replication_reuse_resource: False
+ replication_params:
+ replication_name: "test_replication"
+ destination_nas_server_name: "destination_nas"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "local"
+ destination_pool_name: "Pool_Ansible_Neo_DND"
+ destination_sp: "SPA"
+ is_backup: True
+ replication_state: "enable"
+ state: "present"
+
+ - name: Enable replication for NAS Server on Remote System
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_reuse_resource: False
+ replication_params:
+ replication_name: "test_replication"
+ destination_nas_server_name: "destination_nas"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "remote"
+ remote_system:
+ remote_system_host: '10.10.10.10'
+ remote_system_verifycert: False
+ remote_system_username: 'test1'
+ remote_system_password: 'test1!'
+ destination_pool_name: "fastVP_pool"
+ destination_sp: "SPA"
+ is_backup: True
+ replication_state: "enable"
+ state: "present"
+
+ - name: Enable replication for NAS Server on Remote System in existing NAS Server
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_reuse_resource: True
+ replication_params:
+ destination_nas_server_name: "destination_nas"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "remote"
+ replication_name: "test_replication"
+ remote_system:
+ remote_system_host: '10.10.10.10'
+ remote_system_verifycert: False
+ remote_system_username: 'test1'
+ remote_system_password: 'test1!'
+ destination_pool_name: "fastVP_pool"
+ replication_state: "enable"
+ state: "present"
+
+ - name: Modify replication on the nasserver
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_params:
+ replication_name: "test_repl"
+ new_replication_name: "test_repl_updated"
+ replication_mode: "asynchronous"
+ rpo: 50
+ replication_state: "enable"
+ state: "present"
+
+ - name: Disable replication on the nasserver
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_state: "disable"
+ state: "present"
+
+ - name: Disable replication by specifying replication_name on the nasserver
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_params:
+ replication_name: "test_replication"
+ replication_state: "disable"
+ state: "present"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+nas_server_details (When NAS server exists., dict, {'allow_unmapped_user': None, 'cifs_server': {'UnityCifsServerList': [{'UnityCifsServer': {'hash': 8761756885270, 'id': 'cifs_34'}}]}, 'current_sp': {'UnityStorageProcessor': {'hash': 8761756885273, 'id': 'spb'}}, 'current_unix_directory_service': 'NasServerUnixDirectoryServiceEnum.NIS', 'default_unix_user': None, 'default_windows_user': None, 'existed': True, 'file_dns_server': {'UnityFileDnsServer': {'hash': 8761756885441, 'id': 'dns_12'}}, 'file_interface': {'UnityFileInterfaceList': [{'UnityFileInterface': {'hash': 8761756889908, 'id': 'if_37'}}]}, 'filesystems': None, 'hash': 8761757005084, 'health': {'UnityHealth': {'hash': 8761756867588}}, 'home_sp': {'UnityStorageProcessor': {'hash': 8761756867618, 'id': 'spb'}}, 'id': 'nas_10', 'is_backup_only': False, 'is_multi_protocol_enabled': False, 'is_packet_reflect_enabled': False, 'is_replication_destination': False, 'is_replication_enabled': True, 'is_windows_to_unix_username_mapping_enabled': None, 'name': 'dummy_nas', 'pool': {'UnityPool': {'hash': 8761756885360, 'id': 'pool_7'}}, 'preferred_interface_settings': {'UnityPreferredInterfaceSettings': {'hash': 8761756885438, 'id': 'preferred_if_10'}}, 'replication_type': 'ReplicationTypeEnum.REMOTE', 'size_allocated': 3489660928, 'tenant': None, 'virus_checker': {'UnityVirusChecker': {'hash': 8761756885426, 'id': 'cava_10'}}})
+ The NAS server details.
+
+
+ name (, str, )
+ Name of the NAS server.
+
+
+ id (, str, )
+ ID of the NAS server.
+
+
+ allow_unmapped_user (, bool, )
+ Enable/disable access status in case of any user mapping failure.
+
+
+ current_unix_directory_service (, str, )
+ Directory service used for querying identity information for UNIX (such as UIDs, GIDs, net groups).
+
+
+ default_unix_user (, str, )
+ Default Unix user name used for granting access in the case of Windows to Unix user mapping failure.
+
+
+ default_windows_user (, str, )
+ Default windows user name used for granting access in the case of Unix to Windows user mapping failure.
+
+
+ is_backup_only (, bool, )
+ Whether the NAS server is used as backup only.
+
+
+ is_multi_protocol_enabled (, bool, )
+ Indicates whether multiprotocol sharing mode is enabled.
+
+
+ is_packet_reflect_enabled (, bool, )
+ If the packet reflect has to be enabled.
+
+
+ is_replication_destination (, bool, )
+ If the NAS server is a replication destination then True.
+
+
+ is_windows_to_unix_username_mapping_enabled (, bool, )
+ Indicates whether a Unix to/from Windows user name mapping is enabled.
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/nfs.rst b/ansible_collections/dellemc/unity/docs/modules/nfs.rst
new file mode 100644
index 000000000..cce2058f5
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/nfs.rst
@@ -0,0 +1,626 @@
+.. _nfs_module:
+
+
+nfs -- Manage NFS export on Unity storage system
+================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing NFS export on Unity storage system includes- Create new NFS export, Modify NFS export attributes, Display NFS export details, Delete NFS export.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ nfs_export_name (optional, str, None)
+ Name of the nfs export.
+
+ Mandatory for create operation.
+
+ Specify either *nfs_export_name* or *nfs_export_id* (but not both) for any operation.
+
+
+ nfs_export_id (optional, str, None)
+ ID of the nfs export.
+
+ This is a unique ID generated by Unity storage system.
+
+
+ filesystem_name (optional, str, None)
+ Name of the filesystem for which NFS export will be created.
+
+ Either filesystem or snapshot is required for creation of the NFS.
+
+ If *filesystem_name* is specified, then *nas_server* is required to uniquely identify the filesystem.
+
+ If filesystem parameter is provided, then snapshot cannot be specified.
+
+
+ filesystem_id (optional, str, None)
+ ID of the filesystem.
+
+ This is a unique ID generated by Unity storage system.
+
+
+ snapshot_name (optional, str, None)
+ Name of the snapshot for which NFS export will be created.
+
+ Either filesystem or snapshot is required for creation of the NFS export.
+
+ If snapshot parameter is provided, then filesystem cannot be specified.
+
+
+ snapshot_id (optional, str, None)
+ ID of the snapshot.
+
+ This is a unique ID generated by Unity storage system.
+
+
+ nas_server_name (optional, str, None)
+ Name of the NAS server on which filesystem will be hosted.
+
+
+ nas_server_id (optional, str, None)
+ ID of the NAS server on which filesystem will be hosted.
+
+
+ path (optional, str, None)
+ Local path to export relative to the NAS server root.
+
+ With NFS, each export of a file_system or file_snap must have a unique local path.
+
+ Mandatory while creating NFS export.
+
+
+ description (optional, str, None)
+ Description of the NFS export.
+
+ Optional parameter when creating a NFS export.
+
+ To modify description, pass the new value in *description* field.
+
+ To remove description, pass the empty value in *description* field.
+
+
+ host_state (optional, str, None)
+ Define whether the hosts can access the NFS export.
+
+ Required when adding or removing access of hosts from the export.
+
+
+ anonymous_uid (optional, int, None)
+ Specifies the user ID of the anonymous account.
+
+ If not specified at the time of creation, it will be set to 4294967294.
+
+
+ anonymous_gid (optional, int, None)
+ Specifies the group ID of the anonymous account.
+
+ If not specified at the time of creation, it will be set to 4294967294.
+
+
+ state (True, str, None)
+ State variable to determine whether NFS export will exist or not.
+
+
+ default_access (optional, str, None)
+ Default access level for all hosts that can access the NFS export.
+
+ For hosts that need different access than the default, they can be configured by adding to the list.
+
+ If *default_access* is not mentioned during creation, then NFS export will be created with ``NO_ACCESS``.
+
+
+ min_security (optional, str, None)
+ NFS enforced security type for users accessing a NFS export.
+
+ If not specified at the time of creation, it will be set to ``SYS``.
+
+
+ adv_host_mgmt_enabled (optional, bool, None)
+ If ``false``, allows you to specify hosts without first having to register them.
+
+ Mandatory while adding access hosts.
+
+
+ no_access_hosts (optional, list, None)
+ Hosts with no access to the NFS export.
+
+ List of dictionaries. Each dictionary will have any of the keys from *host_name*, *host_id*, *subnet*, *netgroup*, *domain* and *ip_address*.
+
+ If *adv_host_mgmt_enabled* is ``true`` then the accepted keys are *host_name*, *host_id* and *ip_address*.
+
+ If *adv_host_mgmt_enabled* is ``false`` then the accepted keys are *host_name*, *subnet*, *netgroup*, *domain* and *ip_address*.
+
+
+ host_name (optional, str, None)
+ Name of the host.
+
+
+ host_id (optional, str, None)
+ ID of the host.
+
+
+ ip_address (optional, str, None)
+ IP address of the host.
+
+
+ subnet (optional, str, None)
+ Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+
+
+ netgroup (optional, str, None)
+ Netgroup that is defined in NIS or the local netgroup file.
+
+
+ domain (optional, str, None)
+ DNS domain, where all NFS clients in the domain are included in the host list.
+
+
+
+ read_only_hosts (optional, list, None)
+ Hosts with read-only access to the NFS export.
+
+ List of dictionaries. Each dictionary will have any of the keys from *host_name*, *host_id*, *subnet*, *netgroup*, *domain* and *ip_address*.
+
+ If *adv_host_mgmt_enabled* is ``true`` then the accepted keys are *host_name*, *host_id* and *ip_address*.
+
+ If *adv_host_mgmt_enabled* is ``false`` then the accepted keys are *host_name*, *subnet*, *netgroup*, *domain* and *ip_address*.
+
+
+ host_name (optional, str, None)
+ Name of the host.
+
+
+ host_id (optional, str, None)
+ ID of the host.
+
+
+ ip_address (optional, str, None)
+ IP address of the host.
+
+
+ subnet (optional, str, None)
+ Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+
+
+ netgroup (optional, str, None)
+ Netgroup that is defined in NIS or the local netgroup file.
+
+
+ domain (optional, str, None)
+ DNS domain, where all NFS clients in the domain are included in the host list.
+
+
+
+ read_only_root_hosts (optional, list, None)
+ Hosts with read-only for root user access to the NFS export.
+
+ List of dictionaries. Each dictionary will have any of the keys from *host_name*, *host_id*, *subnet*, *netgroup*, *domain* and *ip_address*.
+
+ If *adv_host_mgmt_enabled* is ``true`` then the accepted keys are *host_name*, *host_id* and *ip_address*.
+
+ If *adv_host_mgmt_enabled* is ``false`` then the accepted keys are *host_name*, *subnet*, *netgroup*, *domain* and *ip_address*.
+
+
+ host_name (optional, str, None)
+ Name of the host.
+
+
+ host_id (optional, str, None)
+ ID of the host.
+
+
+ ip_address (optional, str, None)
+ IP address of the host.
+
+
+ subnet (optional, str, None)
+ Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+
+
+ netgroup (optional, str, None)
+ Netgroup that is defined in NIS or the local netgroup file.
+
+
+ domain (optional, str, None)
+ DNS domain, where all NFS clients in the domain are included in the host list.
+
+
+
+ read_write_hosts (optional, list, None)
+ Hosts with read and write access to the NFS export.
+
+ List of dictionaries. Each dictionary will have any of the keys from *host_name*, *host_id*, *subnet*, *netgroup*, *domain* and *ip_address*.
+
+ If *adv_host_mgmt_enabled* is ``true`` then the accepted keys are *host_name*, *host_id* and *ip_address*.
+
+ If *adv_host_mgmt_enabled* is ``false`` then the accepted keys are *host_name*, *subnet*, *netgroup*, *domain* and *ip_address*.
+
+
+ host_name (optional, str, None)
+ Name of the host.
+
+
+ host_id (optional, str, None)
+ ID of the host.
+
+
+ ip_address (optional, str, None)
+ IP address of the host.
+
+
+ subnet (optional, str, None)
+ Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+
+
+ netgroup (optional, str, None)
+ Netgroup that is defined in NIS or the local netgroup file.
+
+
+ domain (optional, str, None)
+ DNS domain, where all NFS clients in the domain are included in the host list.
+
+
+
+ read_write_root_hosts (optional, list, None)
+ Hosts with read and write for root user access to the NFS export.
+
+ List of dictionaries. Each dictionary will have any of the keys from *host_name*, *host_id*, *subnet*, *netgroup*, *domain* and *ip_address*.
+
+ If *adv_host_mgmt_enabled* is ``true`` then the accepted keys are *host_name*, *host_id* and *ip_address*.
+
+ If *adv_host_mgmt_enabled* is ``false`` then the accepted keys are *host_name*, *subnet*, *netgroup*, *domain* and *ip_address*.
+
+
+ host_name (optional, str, None)
+ Name of the host.
+
+
+ host_id (optional, str, None)
+ ID of the host.
+
+
+ ip_address (optional, str, None)
+ IP address of the host.
+
+
+ subnet (optional, str, None)
+ Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+
+
+ netgroup (optional, str, None)
+ Netgroup that is defined in NIS or the local netgroup file.
+
+
+ domain (optional, str, None)
+ DNS domain, where all NFS clients in the domain are included in the host list.
+
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is not supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Create nfs export from filesystem
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ path: '/'
+ filesystem_id: "fs_377"
+ state: "present"
+
+ - name: Create nfs export from snapshot
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_snap"
+ path: '/'
+ snapshot_name: "ansible_fs_snap"
+ state: "present"
+
+ - name: Modify nfs export
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ nas_server_id: "nas_3"
+ description: ""
+ default_access: "READ_ONLY_ROOT"
+ anonymous_gid: 4294967290
+ anonymous_uid: 4294967290
+ state: "present"
+
+ - name: Add host in nfs export with adv_host_mgmt_enabled as true
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ filesystem_id: "fs_377"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "Host_1"
+ read_only_hosts:
+ - host_id: "Host_2"
+ read_only_root_hosts:
+ - host_name: "host_name1"
+ read_write_hosts:
+ - host_name: "host_name2"
+ read_write_root_hosts:
+ - ip_address: "1.1.1.1"
+ host_state: "present-in-export"
+ state: "present"
+
+ - name: Remove host in nfs export with adv_host_mgmt_enabled as true
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ filesystem_id: "fs_377"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "Host_1"
+ read_only_hosts:
+ - host_id: "Host_2"
+ read_only_root_hosts:
+ - host_name: "host_name1"
+ read_write_hosts:
+ - host_name: "host_name2"
+ read_write_root_hosts:
+ - ip_address: "1.1.1.1"
+ host_state: "absent-in-export"
+ state: "present"
+
+ - name: Add host in nfs export with adv_host_mgmt_enabled as false
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ filesystem_id: "fs_377"
+ adv_host_mgmt_enabled: false
+ no_access_hosts:
+ - domain: "google.com"
+ read_only_hosts:
+ - netgroup: "netgroup_admin"
+ read_only_root_hosts:
+ - host_name: "host5"
+ read_write_hosts:
+ - subnet: "168.159.57.4/255.255.255.0"
+ read_write_root_hosts:
+ - ip_address: "10.255.2.4"
+ host_state: "present-in-export"
+ state: "present"
+
+ - name: Remove host in nfs export with adv_host_mgmt_enabled as false
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ filesystem_id: "fs_377"
+ adv_host_mgmt_enabled: false
+ no_access_hosts:
+ - domain: "google.com"
+ read_only_hosts:
+ - netgroup: "netgroup_admin"
+ read_only_root_hosts:
+ - host_name: "host5"
+ read_write_hosts:
+ - subnet: "168.159.57.4/255.255.255.0"
+ read_write_root_hosts:
+ - ip_address: "10.255.2.4"
+ host_state: "absent-in-export"
+ state: "present"
+
+ - name: Get nfs details
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_id: "NFSShare_291"
+ state: "present"
+
+ - name: Delete nfs export by nfs name
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_name"
+ nas_server_name: "ansible_nas_name"
+ state: "absent"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, false)
+ Whether or not the resource has changed.
+
+
+nfs_share_details (When nfs export exists., dict, {'anonymous_gid': 4294967294, 'anonymous_uid': 4294967294, 'creation_time': '2022-03-09 15:05:34.720000+00:00', 'default_access': 'NFSShareDefaultAccessEnum.NO_ACCESS', 'description': '', 'export_option': 1, 'export_paths': ['**.***.**.**:/dummy-share-123'], 'filesystem': {'UnityFileSystem': {'id': 'fs_id_1', 'name': 'fs_name_1'}}, 'host_accesses': 'None', 'id': 'NFSShare_14393', 'is_read_only': 'None', 'min_security': 'NFSShareSecurityEnum.SYS', 'modification_time': '2022-04-25 08:12:28.179000+00:00', 'name': 'dummy-share-123', 'nfs_owner_username': 'None', 'no_access_hosts': 'None', 'no_access_hosts_string': 'host1,**.***.*.*', 'path': '/', 'read_only_hosts': 'None', 'read_only_hosts_string': '', 'read_only_root_access_hosts': 'None', 'read_only_root_hosts_string': '', 'read_write_hosts': 'None', 'read_write_hosts_string': '', 'read_write_root_hosts_string': '', 'role': 'NFSShareRoleEnum.PRODUCTION', 'root_access_hosts': 'None', 'snap': 'None', 'type': 'NFSTypeEnum.NFS_SHARE', 'existed': True, 'nas_server': {'UnityNasServer': {'id': 'nas_id_1', 'name': 'dummy_nas_server'}}})
+ Details of the nfs export.
+
+
+ anonymous_uid (, int, )
+ User ID of the anonymous account
+
+
+ anonymous_gid (, int, )
+ Group ID of the anonymous account
+
+
+ default_access (, str, )
+ Default access level for all hosts that can access export
+
+
+ description (, str, )
+ Description about the nfs export
+
+
+ id (, str, )
+ ID of the nfs export
+
+
+ min_security (, str, )
+ NFS enforced security type for users accessing an export
+
+
+ name (, str, )
+ Name of the nfs export
+
+
+ no_access_hosts_string (, str, )
+ Hosts with no access to the nfs export
+
+
+ read_only_hosts_string (, str, )
+ Hosts with read-only access to the nfs export
+
+
+ read_only_root_hosts_string (, str, )
+ Hosts with read-only for root user access to the nfs export
+
+
+ read_write_hosts_string (, str, )
+ Hosts with read and write access to the nfs export
+
+
+ read_write_root_hosts_string (, str, )
+ Hosts with read and write for root user access to export
+
+
+ type (, str, )
+ NFS export type. i.e. filesystem or snapshot
+
+
+ export_paths (, list, )
+ Export paths that can be used to mount and access export
+
+
+ filesystem (, dict, )
+ Details of the filesystem on which nfs export is present
+
+
+ UnityFileSystem (, dict, )
+ filesystem details
+
+
+ id (, str, )
+ ID of the filesystem
+
+
+ name (, str, )
+ Name of the filesystem
+
+
+
+
+ nas_server (, dict, )
+ Details of the nas server
+
+
+ UnityNasServer (, dict, )
+ NAS server details
+
+
+ id (, str, )
+ ID of the nas server
+
+
+ name (, str, )
+ Name of the nas server
+
+
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Vivek Soni (@v-soni11) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/nfsserver.rst b/ansible_collections/dellemc/unity/docs/modules/nfsserver.rst
new file mode 100644
index 000000000..0836bb63c
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/nfsserver.rst
@@ -0,0 +1,242 @@
+.. _nfsserver_module:
+
+
+nfsserver -- Manage NFS server on Unity storage system
+======================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing the NFS server on the Unity storage system includes creating NFS server, getting NFS server details and deleting NFS server attributes.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ nas_server_name (optional, str, None)
+ Name of the NAS server on which NFS server will be hosted.
+
+
+ nas_server_id (optional, str, None)
+ ID of the NAS server on which NFS server will be hosted.
+
+
+ nfs_server_id (optional, str, None)
+ ID of the NFS server.
+
+
+ host_name (optional, str, None)
+ Host name of the NFS server.
+
+
+ nfs_v4_enabled (optional, bool, None)
+ Indicates whether the NFSv4 is enabled on the NAS server.
+
+
+ is_secure_enabled (optional, bool, None)
+ Indicates whether the secure NFS is enabled.
+
+
+ kerberos_domain_controller_type (optional, str, None)
+ Type of Kerberos Domain Controller used for secure NFS service.
+
+
+ kerberos_domain_controller_username (optional, str, None)
+ Kerberos Domain Controller administrator username.
+
+
+ kerberos_domain_controller_password (optional, str, None)
+ Kerberos Domain Controller administrator password.
+
+
+ is_extended_credentials_enabled (optional, bool, None)
+ Indicates whether support for more than 16 unix groups in a Unix credential.
+
+
+ remove_spn_from_kerberos (optional, bool, True)
+ Indicates whether to remove the SPN from Kerberos Domain Controller.
+
+
+ state (True, str, None)
+ Define whether the NFS server should exist or not.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is supported.
+ - Modify operation for NFS Server is not supported.
+ - When *kerberos_domain_controller_type* is ``UNIX``, *kdc_type* in *nfs_server_details* output is displayed as ``null``.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+
+ - name: Create NFS server with kdctype as Windows
+ dellemc.unity.nfsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ host_name: "dummy_nas23"
+ is_secure_enabled: True
+ kerberos_domain_controller_type: "WINDOWS"
+ kerberos_domain_controller_username: "administrator"
+ kerberos_domain_controller_password: "Password123!"
+ is_extended_credentials_enabled: True
+ nfs_v4_enabled: True
+ state: "present"
+
+ - name: Create NFS server with kdctype as Unix
+ dellemc.unity.nfsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ host_name: "dummy_nas23"
+ is_secure_enabled: True
+ kerberos_domain_controller_type: "UNIX"
+ is_extended_credentials_enabled: True
+ nfs_v4_enabled: True
+ state: "present"
+
+ - name: Get NFS server details
+ dellemc.unity.nfsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ state: "present"
+
+ - name: Delete NFS server
+ dellemc.unity.nfsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ kerberos_domain_controller_username: "administrator"
+ kerberos_domain_controller_password: "Password123!"
+ unjoin_server_account: False
+ state: "absent"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+nfs_server_details (When NFS server exists, dict, {'credentials_cache_ttl': '0:15:00', 'existed': True, 'file_interfaces': {'UnityFileInterfaceList': [{'UnityFileInterface': {'hash': 8778980109421, 'id': 'if_37'}}]}, 'hash': 8778980109388, 'host_name': 'dummy_nas23.pie.lab.emc.com', 'id': 'nfs_51', 'is_extended_credentials_enabled': True, 'is_secure_enabled': True, 'kdc_type': 'KdcTypeEnum.WINDOWS', 'nas_server': {'UnityNasServer': {'hash': 8778980109412}}, 'nfs_v4_enabled': True, 'servicee_principal_name': None})
+ Details of the NFS server.
+
+
+ credentials_cache_ttl (, str, )
+ Credential cache refresh timeout. Resolution is in minutes. Default value is 15 minutes.
+
+
+ existed (, bool, )
+ Indicates if NFS Server exists.
+
+
+ host_name (, str, )
+ Host name of the NFS server.
+
+
+ id (, str, )
+ Unique identifier of the NFS Server instance.
+
+
+ is_extended_credentials_enabled (, bool, )
+ Indicates whether the NFS server supports more than 16 Unix groups in a Unix credential.
+
+
+ is_secure_enabled (, bool, )
+ Indicates whether secure NFS is enabled on the NFS server.
+
+
+ kdc_type (, str, )
+ Type of Kerberos Domain Controller used for secure NFS service.
+
+
+ nfs_v4_enabled (, bool, )
+ Indicates whether NFSv4 is enabled on the NAS server.
+
+
+ servicee_principal_name (, str, )
+ The Service Principal Name (SPN) for the NFS Server.
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Meenakshi Dembi (@dembim) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/smbshare.rst b/ansible_collections/dellemc/unity/docs/modules/smbshare.rst
new file mode 100644
index 000000000..697bda3ff
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/smbshare.rst
@@ -0,0 +1,381 @@
+.. _smbshare_module:
+
+
+smbshare -- Manage SMB shares on Unity storage system
+=====================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing SMB Shares on Unity storage system includes create, get, modify, and delete the smb shares.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ share_name (optional, str, None)
+ Name of the SMB share.
+
+ Required during creation of the SMB share.
+
+ For all other operations either *share_name* or *share_id* is required.
+
+
+ share_id (optional, str, None)
+ ID of the SMB share.
+
+ Should not be specified during creation. Id is auto generated.
+
+ For all other operations either *share_name* or *share_id* is required.
+
+ If *share_id* is used then no need to pass nas_server/filesystem/snapshot/path.
+
+
+ path (optional, str, None)
+ Local path to the file system/Snapshot or any existing sub-folder of the file system/Snapshot that is shared over the network.
+
+ Path is relative to the root of the filesystem.
+
+ Required for creation of the SMB share.
+
+
+ filesystem_id (optional, str, None)
+ The ID of the File System.
+
+ Either *filesystem_name* or *filesystem_id* is required for creation of the SMB share for filesystem.
+
+ If *filesystem_name* is specified, then *nas_server_name*/*nas_server_id* is required to uniquely identify the filesystem.
+
+ Options *filesystem_name* and *filesystem_id* are mutually exclusive parameters.
+
+
+ snapshot_id (optional, str, None)
+ The ID of the Filesystem Snapshot.
+
+ Either *snapshot_name* or *snapshot_id* is required for creation of the SMB share for a snapshot.
+
+ If *snapshot_name* is specified, then *nas_server_name*/*nas_server_id* is required to uniquely identify the snapshot.
+
+ Options *snapshot_name* and *snapshot_id* are mutually exclusive parameters.
+
+
+ nas_server_id (optional, str, None)
+ The ID of the NAS Server.
+
+ It is not required if *share_id* is used.
+
+
+ filesystem_name (optional, str, None)
+ The Name of the File System.
+
+ Either *filesystem_name* or *filesystem_id* is required for creation of the SMB share for filesystem.
+
+ If *filesystem_name* is specified, then *nas_server_name*/*nas_server_id* is required to uniquely identify the filesystem.
+
+ Options *filesystem_name* and *filesytem_id* are mutually exclusive parameters.
+
+
+ snapshot_name (optional, str, None)
+ The Name of the Filesystem Snapshot.
+
+ Either *snapshot_name* or *snapshot_id* is required for creation of the SMB share for a snapshot.
+
+ If *snapshot_name* is specified, then *nas_server_name*/*nas_server_id* is required to uniquely identify the snapshot.
+
+ Options *snapshot_name* and *snapshot_id* are mutually exclusive parameters.
+
+
+ nas_server_name (optional, str, None)
+ The Name of the NAS Server.
+
+ It is not required if *share_id* is used.
+
+ Options *nas_server_name* and *nas_server_id* are mutually exclusive parameters.
+
+
+ description (optional, str, None)
+ Description for the SMB share.
+
+ Optional parameter when creating a share.
+
+ To modify, pass the new value in description field.
+
+
+ is_abe_enabled (optional, bool, None)
+ Indicates whether Access-based Enumeration (ABE) for SMB share is enabled.
+
+ During creation, if not mentioned then default is ``false``.
+
+
+ is_branch_cache_enabled (optional, bool, None)
+ Indicates whether Branch Cache optimization for SMB share is enabled.
+
+ During creation, if not mentioned then default is ``false``.
+
+
+ is_continuous_availability_enabled (optional, bool, None)
+ Indicates whether continuous availability for SMB 3.0 is enabled.
+
+ During creation, if not mentioned then default is ``false``.
+
+
+ is_encryption_enabled (optional, bool, None)
+ Indicates whether encryption for SMB 3.0 is enabled at the shared folder level.
+
+ During creation, if not mentioned then default is ``false``.
+
+
+ offline_availability (optional, str, None)
+ Defines valid states of Offline Availability.
+
+ ``MANUAL``- Only specified files will be available offline.
+
+ ``DOCUMENTS``- All files that users open will be available offline.
+
+ ``PROGRAMS``- Program will preferably run from the offline cache even when connected to the network. All files that users open will be available offline.
+
+ ``NONE``- Prevents clients from storing documents and programs in offline cache.
+
+
+ umask (optional, str, None)
+ The default UNIX umask for new files created on the SMB Share.
+
+
+ state (True, str, None)
+ Define whether the SMB share should exist or not.
+
+ Value ``present`` indicates that the share should exist on the system.
+
+ Value ``absent`` indicates that the share should not exist on the system.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - When ID/Name of the filesystem/snapshot is passed then *nas_server* is not required. If passed, then filesystem/snapshot should exist for the mentioned *nas_server*, else the task will fail.
+ - The *check_mode* is not supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Create SMB share for a filesystem
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_name: "sample_smb_share"
+ filesystem_name: "sample_fs"
+ nas_server_id: "NAS_11"
+ path: "/sample_fs"
+ description: "Sample SMB share created"
+ is_abe_enabled: True
+ is_branch_cache_enabled: True
+ offline_availability: "DOCUMENTS"
+ is_continuous_availability_enabled: True
+ is_encryption_enabled: True
+ umask: "777"
+ state: "present"
+ - name: Modify Attributes of SMB share for a filesystem
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_name: "sample_smb_share"
+ nas_server_name: "sample_nas_server"
+ description: "Sample SMB share attributes updated"
+ is_abe_enabled: False
+ is_branch_cache_enabled: False
+ offline_availability: "MANUAL"
+ is_continuous_availability_enabled: "False"
+ is_encryption_enabled: "False"
+ umask: "022"
+ state: "present"
+ - name: Create SMB share for a snapshot
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_name: "sample_snap_smb_share"
+ snapshot_name: "sample_snapshot"
+ nas_server_id: "NAS_11"
+ path: "/sample_snapshot"
+ description: "Sample SMB share created for snapshot"
+ is_abe_enabled: True
+ is_branch_cache_enabled: True
+ is_continuous_availability_enabled: True
+ is_encryption_enabled: True
+ umask: "777"
+ state: "present"
+ - name: Modify Attributes of SMB share for a snapshot
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_name: "sample_snap_smb_share"
+ snapshot_name: "sample_snapshot"
+ description: "Sample SMB share attributes updated for snapshot"
+ is_abe_enabled: False
+ is_branch_cache_enabled: False
+ offline_availability: "MANUAL"
+ is_continuous_availability_enabled: "False"
+ is_encryption_enabled: "False"
+ umask: "022"
+ state: "present"
+ - name: Get details of SMB share
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_id: "{{smb_share_id}}"
+ state: "present"
+ - name: Delete SMB share
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_id: "{{smb_share_id}}"
+ state: "absent"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+smb_share_details (When share exists., dict, {'creation_time': '2022-03-17 11:56:54.867000+00:00', 'description': '', 'existed': True, 'export_paths': ['\\\\multi-prot-pie.extreme1.com\\multi-prot-hui', '\\\\10.230.24.26\\multi-prot-hui'], 'filesystem': {'UnityFileSystem': {'hash': 8748426746492}}, 'filesystem_id': 'fs_140', 'filesystem_name': 'multi-prot-hui', 'hash': 8748426746588, 'id': 'SMBShare_20', 'is_abe_enabled': False, 'is_ace_enabled': False, 'is_branch_cache_enabled': False, 'is_continuous_availability_enabled': False, 'is_dfs_enabled': False, 'is_encryption_enabled': False, 'is_read_only': None, 'modified_time': '2022-03-17 11:56:54.867000+00:00', 'name': 'multi-prot-hui', 'nas_server_id': 'nas_5', 'nas_server_name': 'multi-prot', 'offline_availability': 'CifsShareOfflineAvailabilityEnum.NONE', 'path': '/', 'snap': None, 'type': 'CIFSTypeEnum.CIFS_SHARE', 'umask': '022'})
+ The SMB share details.
+
+
+ id (, str, )
+ The ID of the SMB share.
+
+
+ name (, str, sample_smb_share)
+ Name of the SMB share.
+
+
+ filesystem_id (, str, )
+ The ID of the Filesystem.
+
+
+ filesystem_name (, str, )
+ The Name of the filesystem
+
+
+ snapshot_id (, str, )
+ The ID of the Snapshot.
+
+
+ snapshot_name (, str, )
+ The Name of the Snapshot.
+
+
+ nas_server_id (, str, )
+ The ID of the nas_server.
+
+
+ nas_server_name (, str, )
+ The Name of the nas_server.
+
+
+ description (, str, This share is created for demo purpose only.)
+ Additional information about the share.
+
+
+ is_abe_enabled (, bool, False)
+ Whether Access Based enumeration is enforced or not.
+
+
+ is_branch_cache_enabled (, bool, False)
+ Whether branch cache is enabled or not.
+
+
+ is_continuous_availability_enabled (, bool, False)
+ Whether the share will be available continuously or not.
+
+
+ is_encryption_enabled (, bool, False)
+ Whether encryption is enabled or not.
+
+
+ umask (, str, )
+ Unix mask for the SMB share.
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/snapshot.rst b/ansible_collections/dellemc/unity/docs/modules/snapshot.rst
new file mode 100644
index 000000000..46b2aa997
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/snapshot.rst
@@ -0,0 +1,292 @@
+.. _snapshot_module:
+
+
+snapshot -- Manage snapshots on the Unity storage system
+========================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing snapshots on the Unity storage system includes create snapshot, delete snapshot, update snapshot, get snapshot, map host and unmap host.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ snapshot_name (optional, str, None)
+ The name of the snapshot.
+
+ Mandatory parameter for creating a snapshot.
+
+ For all other operations either *snapshot_name* or *snapshot_id* is required.
+
+
+ vol_name (optional, str, None)
+ The name of the volume for which snapshot is created.
+
+ For creation of a snapshot either *vol_name* or *cg_name* is required.
+
+ Not required for other operations.
+
+
+ cg_name (optional, str, None)
+ The name of the Consistency Group for which snapshot is created.
+
+ For creation of a snapshot either *vol_name* or *cg_name* is required.
+
+ Not required for other operations.
+
+
+ snapshot_id (optional, str, None)
+ The id of the snapshot.
+
+ For all operations other than creation either *snapshot_name* or *snapshot_id* is required.
+
+
+ auto_delete (optional, bool, None)
+ This option specifies whether the snapshot is auto deleted or not.
+
+ If set to ``true``, snapshot will expire based on the pool auto deletion policy.
+
+ If set to (false), snapshot will not be auto deleted based on the pool auto deletion policy.
+
+ Option *auto_delete* can not be set to ``true``, if *expiry_time* is specified.
+
+ If during creation neither *auto_delete* nor *expiry_time* is mentioned then snapshot will be created keeping *auto_delete* as ``true``.
+
+ Once the *expiry_time* is set then snapshot cannot be assigned to the auto delete policy.
+
+
+ expiry_time (optional, str, None)
+ This option is for specifying the date and time after which the snapshot will expire.
+
+ The time is to be mentioned in UTC timezone.
+
+ The format is "MM/DD/YYYY HH:MM". Year must be in 4 digits.
+
+
+ description (optional, str, None)
+ The additional information about the snapshot can be provided using this option.
+
+
+ new_snapshot_name (optional, str, None)
+ New name for the snapshot.
+
+
+ state (True, str, None)
+ The *state* option is used to mention the existence of the snapshot.
+
+
+ host_name (optional, str, None)
+ The name of the host.
+
+ Either *host_name* or *host_id* is required to map or unmap a snapshot from a host.
+
+ Snapshot can be attached to multiple hosts.
+
+
+ host_id (optional, str, None)
+ The id of the host.
+
+ Either *host_name* or *host_id* is required to map or unmap a snapshot from a host.
+
+ Snapshot can be attached to multiple hosts.
+
+
+ host_state (optional, str, None)
+ The *host_state* option is used to mention the existence of the host for snapshot.
+
+ It is required when a snapshot is mapped or unmapped from host.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is not supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Create a Snapshot for a CG
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ cg_name: "{{cg_name}}"
+ snapshot_name: "{{cg_snapshot_name}}"
+ description: "{{description}}"
+ auto_delete: False
+ state: "present"
+
+ - name: Create a Snapshot for a volume with Host attached
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ vol_name: "{{vol_name}}"
+ snapshot_name: "{{vol_snapshot_name}}"
+ description: "{{description}}"
+ expiry_time: "04/15/2025 16:30"
+ host_name: "{{host_name}}"
+ host_state: "mapped"
+ state: "present"
+
+ - name: Unmap a host for a Snapshot
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ snapshot_name: "{{vol_snapshot_name}}"
+ host_name: "{{host_name}}"
+ host_state: "unmapped"
+ state: "present"
+
+ - name: Map snapshot to a host
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ snapshot_name: "{{vol_snapshot_name}}"
+ host_name: "{{host_name}}"
+ host_state: "mapped"
+ state: "present"
+
+ - name: Update attributes of a Snapshot for a volume
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "{{vol_snapshot_name}}"
+ new_snapshot_name: "{{new_snapshot_name}}"
+ description: "{{new_description}}"
+ host_name: "{{host_name}}"
+ host_state: "unmapped"
+ state: "present"
+
+ - name: Delete Snapshot of CG
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "{{cg_snapshot_name}}"
+ state: "absent"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+snapshot_details (When snapshot exists, dict, {'access_type': None, 'attached_wwn': None, 'creation_time': '2022-10-21 08:20:25.803000+00:00', 'creator_schedule': None, 'creator_type': 'SnapCreatorTypeEnum.USER_CUSTOM', 'creator_user': {'id': 'user_admin'}, 'description': 'Test snap creation', 'existed': True, 'expiration_time': None, 'hash': 8756689457056, 'hosts_list': [], 'id': '85899355291', 'io_limit_policy': None, 'is_auto_delete': True, 'is_modifiable': False, 'is_modified': False, 'is_read_only': True, 'is_system_snap': False, 'last_writable_time': None, 'lun': None, 'name': 'ansible_snap_cg_1_1', 'parent_snap': None, 'size': None, 'snap_group': None, 'state': 'SnapStateEnum.READY', 'storage_resource_id': 'res_95', 'storage_resource_name': 'CG_ansible_test_2_new'})
+ Details of the snapshot.
+
+
+ is_auto_delete (, str, )
+ Additional information mentioned for snapshot.
+
+
+ expiration_time (, str, )
+ Date and time after which the snapshot will expire.
+
+
+ hosts_list (, dict, )
+ Contains the name and id of the associated hosts.
+
+
+ id (, str, )
+ Unique identifier of the snapshot instance.
+
+
+ name (, str, )
+ The name of the snapshot.
+
+
+ storage_resource_name (, str, )
+ Name of the storage resource for which the snapshot exists.
+
+
+ storage_resource_id (, str, )
+ Id of the storage resource for which the snapshot exists.
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/snapshotschedule.rst b/ansible_collections/dellemc/unity/docs/modules/snapshotschedule.rst
new file mode 100644
index 000000000..4e9a37de2
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/snapshotschedule.rst
@@ -0,0 +1,421 @@
+.. _snapshotschedule_module:
+
+
+snapshotschedule -- Manage snapshot schedules on Unity storage system
+=====================================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing snapshot schedules on Unity storage system includes creating new snapshot schedule, getting details of snapshot schedule, modifying attributes of snapshot schedule, and deleting snapshot schedule.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ name (optional, str, None)
+ The name of the snapshot schedule.
+
+ Name is mandatory for a create operation.
+
+ Specify either *name* or *id* (but not both) for any operation.
+
+
+ id (optional, str, None)
+ The ID of the snapshot schedule.
+
+
+ type (optional, str, None)
+ Type of the rule to be included in snapshot schedule.
+
+ Type is mandatory for any create or modify operation.
+
+ Once the snapshot schedule is created with one type it can be modified.
+
+
+ interval (optional, int, None)
+ Number of hours between snapshots.
+
+ Applicable only when rule type is ``every_n_hours``.
+
+
+ hours_of_day (optional, list, None)
+ Hours of the day when the snapshot will be taken.
+
+ Applicable only when rule type is ``every_day``.
+
+
+ day_interval (optional, int, None)
+ Number of days between snapshots.
+
+ Applicable only when rule type is ``every_n_days``.
+
+
+ days_of_week (optional, list, None)
+ Days of the week for which the snapshot schedule rule applies.
+
+ Applicable only when rule type is ``every_week``.
+
+
+ day_of_month (optional, int, None)
+ Day of the month for which the snapshot schedule rule applies.
+
+ Applicable only when rule type is ``every_month``.
+
+ Value should be [1, 31].
+
+
+ hour (optional, int, None)
+ The hour when the snapshot will be taken.
+
+ Applicable for ``every_n_days``, ``every_week``, ``every_month`` rule types.
+
+ For create operation, if *hour* parameter is not specified, value will be taken as ``0``.
+
+ Value should be [0, 23].
+
+
+ minute (optional, int, None)
+ Minute offset from the hour when the snapshot will be taken.
+
+ Applicable for all rule types.
+
+ For a create operation, if *minute* parameter is not specified, value will be taken as ``0``.
+
+ Value should be [0, 59].
+
+
+ desired_retention (optional, int, None)
+ The number of days/hours for which snapshot will be retained.
+
+ When *auto_delete* is ``true``, *desired_retention* cannot be specified.
+
+ Maximum desired retention supported is 31 days or 744 hours.
+
+
+ retention_unit (optional, str, hours)
+ The retention unit for the snapshot.
+
+
+ auto_delete (optional, bool, None)
+ Indicates whether the system can automatically delete the snapshot.
+
+
+ state (True, str, None)
+ Define whether the snapshot schedule should exist or not.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Snapshot schedule created through Ansible will have only one rule.
+ - Modification of rule type is not allowed. Within the same type, other parameters can be modified.
+ - If an existing snapshot schedule has more than 1 rule in it, only get and delete operation is allowed.
+ - The *check_mode* is not supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Create snapshot schedule (Rule Type - every_n_hours)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_N_Hours_Testing"
+ type: "every_n_hours"
+ interval: 6
+ desired_retention: 24
+ state: "{{state_present}}"
+
+ - name: Create snapshot schedule (Rule Type - every_day)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Day_Testing"
+ type: "every_day"
+ hours_of_day:
+ - 8
+ - 14
+ auto_delete: True
+ state: "{{state_present}}"
+
+ - name: Create snapshot schedule (Rule Type - every_n_days)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_N_Day_Testing"
+ type: "every_n_days"
+ day_interval: 2
+ desired_retention: 16
+ retention_unit: "days"
+ state: "{{state_present}}"
+
+ - name: Create snapshot schedule (Rule Type - every_week)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Week_Testing"
+ type: "every_week"
+ days_of_week:
+ - MONDAY
+ - FRIDAY
+ hour: 12
+ minute: 30
+ desired_retention: 200
+ state: "{{state_present}}"
+
+ - name: Create snapshot schedule (Rule Type - every_month)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Month_Testing"
+ type: "every_month"
+ day_of_month: 17
+ auto_delete: True
+ state: "{{state_present}}"
+
+ - name: Get snapshot schedule details using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_N_Hours_Testing"
+ state: "{{state_present}}"
+
+ - name: Get snapshot schedule details using id
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ id: "{{id}}"
+ state: "{{state_present}}"
+
+ - name: Modify snapshot schedule details id
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ id: "{{id}}"
+ type: "every_n_hours"
+ interval: 8
+ state: "{{state_present}}"
+
+ - name: Modify snapshot schedule using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Day_Testing"
+ type: "every_day"
+ desired_retention: 200
+ auto_delete: False
+ state: "{{state_present}}"
+
+ - name: Delete snapshot schedule using id
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ id: "{{id}}"
+ state: "{{state_absent}}"
+
+ - name: Delete snapshot schedule using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Day_Testing"
+ state: "{{state_absent}}"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+snapshot_schedule_details (When snapshot schedule exists, dict, {'existed': True, 'hash': 8742032390151, 'id': 'snapSch_63', 'is_default': False, 'is_modified': None, 'is_sync_replicated': False, 'luns': None, 'modification_time': '2021-12-14 21:37:47.905000+00:00', 'name': 'SS7_empty_hour_SS', 'rules': [{'access_type': 'FilesystemSnapAccessTypeEnum.CHECKPOINT', 'days_of_month': None, 'days_of_week': {'DayOfWeekEnumList': []}, 'existed': True, 'hash': 8742032280772, 'hours': [0], 'id': 'SchedRule_109', 'interval': 2, 'is_auto_delete': False, 'minute': 0, 'retention_time': 86400, 'retention_time_in_hours': 24, 'rule_type': 'every_n_days', 'type': 'ScheduleTypeEnum.N_DAYS_AT_HHMM'}], 'storage_resources': None, 'version': 'ScheduleVersionEnum.LEGACY'})
+ Details of the snapshot schedule.
+
+
+ id (, str, )
+ The system ID given to the snapshot schedule.
+
+
+ name (, str, )
+ The name of the snapshot schedule.
+
+
+ luns (, dict, )
+ Details of volumes for which snapshot schedule applied.
+
+
+ UnityLunList (, list, )
+ List of volumes for which snapshot schedule applied.
+
+
+ UnityLun (, dict, )
+ Detail of volume.
+
+
+ id (, str, )
+ The system ID given to volume.
+
+
+
+
+
+ rules (, list, )
+ Details of rules that apply to snapshot schedule.
+
+
+ id (, str, )
+ The system ID of the rule.
+
+
+ interval (, int, )
+ Number of days or hours between snaps, depending on the rule type.
+
+
+ hours (, list, )
+ Hourly frequency for the snapshot schedule rule.
+
+
+ minute (, int, )
+ Minute frequency for the snapshot schedule rule.
+
+
+ days_of_week (, dict, )
+ Days of the week for which the snapshot schedule rule applies.
+
+
+ DayOfWeekEnumList (, list, )
+ Enumeration of days of the week.
+
+
+
+ days_of_month (, list, )
+ Days of the month for which the snapshot schedule rule applies.
+
+
+ retention_time (, int, )
+ Period of time in seconds for which to keep the snapshot.
+
+
+ retention_time_in_hours (, int, )
+ Period of time in hours for which to keep the snapshot.
+
+
+ rule_type (, str, )
+ Type of the rule applied to snapshot schedule.
+
+
+ is_auto_delete (, bool, )
+ Indicates whether the system can automatically delete the snapshot based on pool automatic-deletion thresholds.
+
+
+
+ storage_resources (, dict, )
+ Details of storage resources for which snapshot. schedule applied.
+
+
+ UnityStorageResourceList (, list, )
+ List of storage resources for which snapshot schedule applied.
+
+
+ UnityStorageResource (, dict, )
+ Detail of storage resource.
+
+
+ id (, str, )
+ The system ID given to storage resource.
+
+
+
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Akash Shendge (@shenda1) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/storagepool.rst b/ansible_collections/dellemc/unity/docs/modules/storagepool.rst
new file mode 100644
index 000000000..764f2a812
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/storagepool.rst
@@ -0,0 +1,361 @@
+.. _storagepool_module:
+
+
+storagepool -- Manage storage pool on Unity
+===========================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing storage pool on Unity storage system contains the operations Get details of storage pool, Create a storage pool, Modify storage pool.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ pool_name (optional, str, None)
+ Name of the storage pool, unique in the storage system.
+
+
+ pool_id (optional, str, None)
+ Unique identifier of the pool instance.
+
+
+ new_pool_name (optional, str, None)
+ New name of the storage pool, unique in the storage system.
+
+
+ pool_description (optional, str, None)
+ The description of the storage pool.
+
+
+ fast_cache (optional, str, None)
+ Indicates whether the fast cache is enabled for the storage pool.
+
+ ``Enabled`` - FAST Cache is enabled for the pool.
+
+ ``Disabled`` - FAST Cache is disabled for the pool.
+
+
+ fast_vp (optional, str, None)
+ Indicates whether to enable scheduled data relocations for the pool.
+
+ ``Enabled`` - Enabled scheduled data relocations for the pool.
+
+ ``Disabled`` - Disabled scheduled data relocations for the pool.
+
+
+ raid_groups (optional, dict, None)
+ Parameters to create RAID group from the disks and add it to the pool.
+
+
+ disk_group_id (optional, str, None)
+ Id of the disk group.
+
+
+ disk_num (optional, int, None)
+ Number of disks.
+
+
+ raid_type (optional, str, None)
+ RAID group types or RAID levels.
+
+
+ stripe_width (optional, str, None)
+ RAID group stripe widths, including parity or mirror disks.
+
+
+
+ alert_threshold (optional, int, None)
+ Threshold at which the system will generate alerts about the free space in the pool, specified as a percentage.
+
+ Minimum threshold limit is 50.
+
+ Maximum threshold limit is 84.
+
+
+ is_harvest_enabled (optional, bool, None)
+ Enable/Disable automatic deletion of snapshots based on pool space usage.
+
+
+ pool_harvest_high_threshold (optional, float, None)
+ Max threshold for space used in pool beyond which the system automatically starts deleting snapshots in the pool.
+
+ Applies when the automatic deletion of snapshots based on pool space usage is enabled for the system and pool.
+
+ Minimum pool harvest high threshold value is 1.
+
+ Maximum pool harvest high threshold value is 99.
+
+
+ pool_harvest_low_threshold (optional, float, None)
+ Min threshold for space used in pool below which the system automatically stops deletion of snapshots in the pool.
+
+ Applies when the automatic deletion of snapshots based on pool space usage is enabled for the system and pool.
+
+ Minimum pool harvest low threshold value is 0.
+
+ Maximum pool harvest low threshold value is 98.
+
+
+ is_snap_harvest_enabled (optional, bool, None)
+ Enable/Disable automatic deletion of snapshots based on pool space usage.
+
+
+ snap_harvest_high_threshold (optional, float, None)
+ Max threshold for space used in snapshot beyond which the system automatically starts deleting snapshots in the pool.
+
+ Applies when the automatic deletion of snapshots based on pool space usage is enabled for the pool.
+
+ Minimum snap harvest high threshold value is 1.
+
+ Maximum snap harvest high threshold value is 99.
+
+
+ snap_harvest_low_threshold (optional, float, None)
+ Min threshold for space used in snapshot below which the system will stop automatically deleting snapshots in the pool.
+
+ Applies when the automatic deletion of snapshots based on pool space usage is enabled for the pool.
+
+ Minimum snap harvest low threshold value is 0.
+
+ Maximum snap harvest low threshold value is 98.
+
+
+ pool_type (optional, str, None)
+ Indicates storage pool type.
+
+
+ state (True, str, None)
+ Define whether the storage pool should exist or not.
+
+ ``Present`` - indicates that the storage pool should exist on the system.
+
+ ``Absent`` - indicates that the storage pool should not exist on the system.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Deletion of storage pool is not allowed through Ansible module.
+ - The *check_mode* is not supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Get Storage pool details using pool_name
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_name: "{{pool_name}}"
+ state: "present"
+
+ - name: Get Storage pool details using pool_id
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_id: "{{pool_id}}"
+ state: "present"
+
+ - name: Modify Storage pool attributes using pool_name
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_name: "{{pool_name}}"
+ new_pool_name: "{{new_pool_name}}"
+ pool_description: "{{pool_description}}"
+ fast_cache: "{{fast_cache_enabled}}"
+ fast_vp: "{{fast_vp_enabled}}"
+ state: "present"
+
+ - name: Modify Storage pool attributes using pool_id
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_id: "{{pool_id}}"
+ new_pool_name: "{{new_pool_name}}"
+ pool_description: "{{pool_description}}"
+ fast_cache: "{{fast_cache_enabled}}"
+ fast_vp: "{{fast_vp_enabled}}"
+ state: "present"
+
+ - name: Create a StoragePool
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_name: "Test"
+ pool_description: "test pool"
+ raid_groups:
+ disk_group_id : "dg_16"
+ disk_num : 2
+ raid_type : "RAID10"
+ stripe_width : "BEST_FIT"
+ alert_threshold : 50
+ is_harvest_enabled : True
+ pool_harvest_high_threshold : 60
+ pool_harvest_low_threshold : 40
+ is_snap_harvest_enabled : True
+ snap_harvest_high_threshold : 70
+ snap_harvest_low_threshold : 50
+ fast_vp: "enabled"
+ fast_cache: "enabled"
+ pool_type : "DYNAMIC"
+ state: "present"
+
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the storage pool has changed.
+
+
+storage_pool_details (When storage pool exists., dict, {'alert_threshold': 50, 'creation_time': '2022-03-08 14:05:32+00:00', 'description': '', 'drives': [{'disk_technology': 'SAS', 'id': 'dpe_disk_22', 'name': 'DPE Drive 22', 'size': 590860984320, 'tier_type': 'PERFORMANCE'}, {'disk_technology': 'SAS', 'id': 'dpe_disk_23', 'name': 'DPE Drive 23', 'size': 590860984320, 'tier_type': 'PERFORMANCE'}, {'disk_technology': 'SAS', 'id': 'dpe_disk_24', 'name': 'DPE Drive 24', 'size': 590860984320, 'tier_type': 'PERFORMANCE'}], 'existed': True, 'harvest_state': 'UsageHarvestStateEnum.IDLE', 'hash': 8744642897210, 'health': {'UnityHealth': {'hash': 8744642799842}}, 'id': 'pool_280', 'is_all_flash': False, 'is_empty': False, 'is_fast_cache_enabled': False, 'is_fast_vp_enabled': False, 'is_harvest_enabled': True, 'is_snap_harvest_enabled': True, 'metadata_size_subscribed': 105763569664, 'metadata_size_used': 57176752128, 'name': 'test_pool', 'object_id': 12884902146, 'pool_fast_vp': {'UnityPoolFastVp': {'hash': 8744647518980}}, 'pool_space_harvest_high_threshold': 59.0, 'pool_space_harvest_low_threshold': 40.0, 'pool_type': 'StoragePoolTypeEnum.DYNAMIC', 'raid_type': 'RaidTypeEnum.RAID10', 'rebalance_progress': None, 'size_free': 470030483456, 'size_free_with_unit': '437.75 GB', 'size_subscribed': 447215820800, 'size_subscribed_with_unit': '416.5 GB', 'size_total': 574720311296, 'size_total_with_unit': '535.25 GB', 'size_used': 76838068224, 'size_used_with_unit': '71.56 GB', 'snap_size_subscribed': 128851369984, 'snap_size_subscribed_with_unit': '120.0 GB', 'snap_size_used': 2351104, 'snap_size_used_with_unit': '2.24 MB', 'snap_space_harvest_high_threshold': 80.0, 'snap_space_harvest_low_threshold': 60.0, 'tiers': {'UnityPoolTierList': [{'disk_count': [0, 3, 0], 'existed': True, 'hash': 8744643017382, 'name': ['Extreme Performance', 'Performance', 'Capacity'], 'pool_units': [None, {'UnityPoolUnitList': [{'UnityPoolUnit': {'hash': 8744642786759, 'id': 'rg_4'}}, {'UnityPoolUnit': {'hash': 8744642786795, 'id': 'rg_5'}}]}, None], 'raid_type': ['RaidTypeEnum.NONE', 'RaidTypeEnum.RAID10', 'RaidTypeEnum.NONE'], 'size_free': [0, 470030483456, 0], 'size_moving_down': [0, 0, 0], 'size_moving_up': [0, 0, 0], 'size_moving_within': [0, 0, 0], 'size_total': [0, 574720311296, 0], 'size_used': [0, 104689827840, 0], 'stripe_width': [None, 'RaidStripeWidthEnum._2', None], 'tier_type': ['TierTypeEnum.EXTREME_PERFORMANCE', 'TierTypeEnum.PERFORMANCE', 'TierTypeEnum.CAPACITY']}]}})
+ The storage pool details.
+
+
+ id (, str, )
+ Pool id, unique identifier of the pool.
+
+
+ name (, str, )
+ Pool name, unique in the storage system.
+
+
+ is_fast_cache_enabled (, bool, )
+ Indicates whether the fast cache is enabled for the storage pool. true - FAST Cache is enabled for the pool. false - FAST Cache is disabled for the pool.
+
+
+ is_fast_vp_enabled (, bool, )
+ Indicates whether to enable scheduled data relocations for the storage pool. true - Enabled scheduled data relocations for the pool. false - Disabled scheduled data relocations for the pool.
+
+
+ size_free_with_unit (, str, )
+ Indicates size_free with its appropriate unit in human readable form.
+
+
+ size_subscribed_with_unit (, str, )
+ Indicates size_subscribed with its appropriate unit in human readable form.
+
+
+ size_total_with_unit (, str, )
+ Indicates size_total with its appropriate unit in human readable form.
+
+
+ size_used_with_unit (, str, )
+ Indicates size_used with its appropriate unit in human readable form.
+
+
+ snap_size_subscribed_with_unit (, str, )
+ Indicates snap_size_subscribed with its appropriate unit in human readable form.
+
+
+ snap_size_used_with_unit (, str, )
+ Indicates snap_size_used with its appropriate unit in human readable form.
+
+
+ drives (, list, )
+ Indicates information about the drives associated with the storage pool.
+
+
+ id (, str, )
+ Unique identifier of the drive.
+
+
+ name (, str, )
+ Indicates name of the drive.
+
+
+ size (, str, )
+ Indicates size of the drive.
+
+
+ disk_technology (, str, )
+ Indicates disk technology of the drive.
+
+
+ tier_type (, str, )
+ Indicates tier type of the drive.
+
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Ambuj Dubey (@AmbujDube) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/tree_quota.rst b/ansible_collections/dellemc/unity/docs/modules/tree_quota.rst
new file mode 100644
index 000000000..285ab9d79
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/tree_quota.rst
@@ -0,0 +1,310 @@
+.. _tree_quota_module:
+
+
+tree_quota -- Manage quota tree on the Unity storage system
+===========================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing Quota tree on the Unity storage system includes Create quota tree, Get quota tree, Modify quota tree and Delete quota tree.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ filesystem_name (optional, str, None)
+ The name of the filesystem for which quota tree is created.
+
+ For creation or modification of a quota tree either *filesystem_name* or *filesystem_id* is required.
+
+
+ filesystem_id (optional, str, None)
+ The ID of the filesystem for which the quota tree is created.
+
+ For creation of a quota tree either *filesystem_id* or *filesystem_name* is required.
+
+
+ nas_server_name (optional, str, None)
+ The name of the NAS server in which the filesystem is created.
+
+ For creation of a quota tree either *nas_server_name* or *nas_server_id* is required.
+
+
+ nas_server_id (optional, str, None)
+ The ID of the NAS server in which the filesystem is created.
+
+ For creation of a quota tree either *filesystem_id* or *filesystem_name* is required.
+
+
+ tree_quota_id (optional, str, None)
+ The ID of the quota tree.
+
+ Either *tree_quota_id* or *path* to quota tree is required to view/modify/delete quota tree.
+
+
+ path (optional, str, None)
+ The path to the quota tree.
+
+ Either *tree_quota_id* or *path* to quota tree is required to create/view/modify/delete a quota tree.
+
+ Path must start with a forward slash '/'.
+
+
+ hard_limit (optional, int, None)
+ Hard limitation for a quota tree on the total space available. If exceeded, users in quota tree cannot write data.
+
+ Value ``0`` implies no limit.
+
+ One of the values of *soft_limit* and *hard_limit* can be ``0``, however, both cannot be both ``0`` during creation of a quota tree.
+
+
+ soft_limit (optional, int, None)
+ Soft limitation for a quota tree on the total space available. If exceeded, notification will be sent to users in the quota tree for the grace period mentioned, beyond which users cannot use space.
+
+ Value ``0`` implies no limit.
+
+ Both *soft_limit* and *hard_limit* cannot be ``0`` during creation of quota tree.
+
+
+ cap_unit (optional, str, None)
+ Unit of *soft_limit* and *hard_limit* size.
+
+ It defaults to ``GB`` if not specified.
+
+
+ description (optional, str, None)
+ Description of a quota tree.
+
+
+ state (True, str, None)
+ The state option is used to mention the existence of the filesystem quota tree.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is not supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Get quota tree details by quota tree id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ tree_quota_id: "treequota_171798700679_10"
+ state: "present"
+
+ - name: Get quota tree details by quota tree path
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "fs_2171"
+ nas_server_id: "nas_21"
+ path: "/test"
+ state: "present"
+
+ - name: Create quota tree for a filesystem with filesystem id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ hard_limit: 6
+ cap_unit: "TB"
+ soft_limit: 5
+ path: "/test_new"
+ state: "present"
+
+ - name: Create quota tree for a filesystem with filesystem name
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "Test_filesystem"
+ nas_server_name: "lglad068"
+ hard_limit: 6
+ cap_unit: "TB"
+ soft_limit: 5
+ path: "/test_new"
+ state: "present"
+
+ - name: Modify quota tree limit usage by quota tree path
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ path: "/test_new"
+ hard_limit: 10
+ cap_unit: "TB"
+ soft_limit: 8
+ state: "present"
+
+ - name: Modify quota tree by quota tree id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ tree_quota_id: "treequota_171798700679_10"
+ hard_limit: 12
+ cap_unit: "TB"
+ soft_limit: 10
+ state: "present"
+
+ - name: Delete quota tree by quota tree id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ tree_quota_id: "treequota_171798700679_10"
+ state: "absent"
+
+ - name: Delete quota tree by path
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ path: "/test_new"
+ state: "absent"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+get_tree_quota_details (When quota tree exists, dict, {'description': '', 'existed': True, 'filesystem': {'UnityFileSystem': {'hash': 8788549469862, 'id': 'fs_137', 'name': 'test', 'nas_server': {'id': 'nas_1', 'name': 'lglad072'}}}, 'gp_left': None, 'hard_limit': '6.0 TB', 'hash': 8788549497558, 'id': 'treequota_171798694897_1', 'path': 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER', 'size_used': 0, 'soft_limit': '5.0 TB', 'state': 0})
+ Details of the quota tree.
+
+
+ filesystem (, dict, )
+ Filesystem details for which the quota tree is created.
+
+
+ UnityFileSystem (, dict, )
+ Filesystem details for which the quota tree is created.
+
+
+ id (, str, )
+ ID of the filesystem for which the quota tree is create.
+
+
+
+
+ description (, str, )
+ Description of the quota tree.
+
+
+ path (, str, )
+ Path to quota tree. A valid path must start with a forward slash '/'. It is mandatory while creating a quota tree.
+
+
+ hard_limit (, int, )
+ Hard limit of quota tree. If the quota tree's space usage exceeds the hard limit, users in quota tree cannot write data.
+
+
+ soft_limit (, int, )
+ Soft limit of the quota tree. If the quota tree's space usage exceeds the soft limit, the storage system starts to count down based on the specified grace period.
+
+
+ id (, str, )
+ Quota tree ID.
+
+
+ size_used (, int, )
+ Size of used space in the filesystem by the user files.
+
+
+ gp_left (, int, )
+ The grace period left after the soft limit for the user quota is exceeded.
+
+
+ state (, int, )
+ State of the quota tree.
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Spandita Panigrahi (@panigs7) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/user_quota.rst b/ansible_collections/dellemc/unity/docs/modules/user_quota.rst
new file mode 100644
index 000000000..7d0bbb808
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/user_quota.rst
@@ -0,0 +1,456 @@
+.. _user_quota_module:
+
+
+user_quota -- Manage user quota on the Unity storage system
+===========================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing User Quota on the Unity storage system includes Create user quota, Get user quota, Modify user quota, Delete user quota, Create user quota for quota tree, Modify user quota for quota tree and Delete user quota for quota tree.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ filesystem_name (optional, str, None)
+ The name of the filesystem for which the user quota is created.
+
+ For creation of a user quota either *filesystem_name* or *filesystem_id* is required.
+
+
+ filesystem_id (optional, str, None)
+ The ID of the filesystem for which the user quota is created.
+
+ For creation of a user quota either *filesystem_id* or *filesystem_name* is required.
+
+
+ nas_server_name (optional, str, None)
+ The name of the NAS server in which the filesystem is created.
+
+ For creation of a user quota either *nas_server_name* or *nas_server_id* is required.
+
+
+ nas_server_id (optional, str, None)
+ The ID of the NAS server in which the filesystem is created.
+
+ For creation of a user quota either *filesystem_id* or *filesystem_name* is required.
+
+
+ hard_limit (optional, int, None)
+ Hard limitation for a user on the total space available. If exceeded, user cannot write data.
+
+ Value ``0`` implies no limit.
+
+ One of the values of *soft_limit* and *hard_limit* can be ``0``, however, both cannot be ``0`` during creation or modification of user quota.
+
+
+ soft_limit (optional, int, None)
+ Soft limitation for a user on the total space available. If exceeded, notification will be sent to the user for the grace period mentioned, beyond which the user cannot use space.
+
+ Value ``0`` implies no limit.
+
+ Both *soft_limit* and *hard_limit* cannot be ``0`` during creation or modification of user quota.
+
+
+ cap_unit (optional, str, None)
+ Unit of *soft_limit* and *hard_limit* size.
+
+ It defaults to ``GB`` if not specified.
+
+
+ user_type (optional, str, None)
+ Type of user creating a user quota.
+
+ Mandatory while creating or modifying user quota.
+
+
+ win_domain (optional, str, None)
+ Fully qualified or short domain name for Windows user type.
+
+ Mandatory when *user_type* is ``Windows``.
+
+
+ user_name (optional, str, None)
+ User name of the user quota when *user_type* is ``Windows`` or ``Unix``.
+
+ Option *user_name* must be specified along with *win_domain* when *user_type* is ``Windows``.
+
+
+ uid (optional, str, None)
+ User ID of the user quota.
+
+
+ user_quota_id (optional, str, None)
+ User quota ID generated after creation of a user quota.
+
+
+ tree_quota_id (optional, str, None)
+ The ID of the quota tree.
+
+ Either *tree_quota_id* or *path* to quota tree is required to create/modify/delete user quota for a quota tree.
+
+
+ path (optional, str, None)
+ The path to the quota tree.
+
+ Either *tree_quota_id* or *path* to quota tree is required to create/modify/delete user quota for a quota tree.
+
+ Path must start with a forward slash '/'.
+
+
+ state (True, str, None)
+ The *state* option is used to mention the existence of the user quota.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is not supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Get user quota details by user quota id
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ user_quota_id: "userquota_171798700679_0_123"
+ state: "present"
+
+ - name: Get user quota details by user quota uid/user name
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "fs_2171"
+ nas_server_id: "nas_21"
+ user_name: "test"
+ state: "present"
+
+ - name: Create user quota for a filesystem with filesystem id
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ hard_limit: 6
+ cap_unit: "TB"
+ soft_limit: 5
+ uid: "111"
+ state: "present"
+
+ - name: Create user quota for a filesystem with filesystem name
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "Test_filesystem"
+ nas_server_name: "lglad068"
+ hard_limit: 6
+ cap_unit: "TB"
+ soft_limit: 5
+ uid: "111"
+ state: "present"
+
+ - name: Modify user quota limit usage by user quota id
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ user_quota_id: "userquota_171798700679_0_123"
+ hard_limit: 10
+ cap_unit: "TB"
+ soft_limit: 8
+ state: "present"
+
+ - name: Modify user quota by filesystem id and user quota uid/user_name
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ user_type: "Windows"
+ win_domain: "prod"
+ user_name: "sample"
+ hard_limit: 12
+ cap_unit: "TB"
+ soft_limit: 10
+ state: "present"
+
+ - name: Delete user quota
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ win_domain: "prod"
+ user_name: "sample"
+ state: "absent"
+
+ - name: Create user quota of a quota tree
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ tree_quota_id: "treequota_171798700679_4"
+ user_type: "Windows"
+ win_domain: "prod"
+ user_name: "sample"
+ soft_limit: 9
+ cap_unit: "TB"
+ state: "present"
+
+ - name: Create user quota of a quota tree by quota tree path
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ path: "/sample"
+ user_type: "Unix"
+ user_name: "test"
+ hard_limit: 2
+ cap_unit: "TB"
+ state: "present"
+
+ - name: Modify user quota of a quota tree
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ tree_quota_id: "treequota_171798700679_4"
+ user_type: "Windows"
+ win_domain: "prod"
+ user_name: "sample"
+ soft_limit: 10
+ cap_unit: "TB"
+ state: "present"
+
+ - name: Modify user quota of a quota tree by quota tree path
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ path: "/sample"
+ user_type: "Windows"
+ win_domain: "prod"
+ user_name: "sample"
+ hard_limit: 12
+ cap_unit: "TB"
+ state: "present"
+
+ - name: Delete user quota of a quota tree by quota tree path
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ path: "/sample"
+ win_domain: "prod"
+ user_name: "sample"
+ state: "absent"
+
+ - name: Delete user quota of a quota tree by quota tree id
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ tree_quota_id: "treequota_171798700679_4"
+ win_domain: "prod"
+ user_name: "sample"
+ state: "absent"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+get_user_quota_details (When user quota exists, dict, {'existed': True, 'filesystem': {'UnityFileSystem': {'hash': 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER', 'id': 'fs_120', 'name': 'nfs-multiprotocol', 'nas_server': {'id': 'nas_1', 'name': 'lglad072'}}}, 'gp_left': None, 'hard_limit': '10.0 GB', 'hard_ratio': None, 'hash': 8752448438089, 'id': 'userquota_171798694698_0_60000', 'size_used': 0, 'soft_limit': '10.0 GB', 'soft_ratio': None, 'state': 0, 'tree_quota': None, 'uid': 60000, 'unix_name': None, 'windows_names': None, 'windows_sids': None})
+ Details of the user quota.
+
+
+ filesystem (, dict, )
+ Filesystem details for which the user quota is created.
+
+
+ UnityFileSystem (, dict, )
+ Filesystem details for which the user quota is created.
+
+
+ id (, str, )
+ ID of the filesystem for which the user quota is created.
+
+
+ name (, str, )
+ Name of filesystem.
+
+
+ nas_server (, dict, )
+ Nasserver details where filesystem is created.
+
+
+ name (, str, )
+ Name of nasserver.
+
+
+ id (, str, )
+ ID of nasserver.
+
+
+
+
+
+ tree_quota (, dict, )
+ Quota tree details for which the user quota is created.
+
+
+ UnityTreeQuota (, dict, )
+ Quota tree details for which the user quota is created.
+
+
+ id (, str, )
+ ID of the quota tree.
+
+
+ path (, str, )
+ Path to quota tree.
+
+
+
+
+ gp_left (, int, )
+ The grace period left after the soft limit for the user quota is exceeded.
+
+
+ hard_limit (, int, )
+ Hard limitation for a user on the total space available. If exceeded, user cannot write data.
+
+
+ hard_ratio (, str, )
+ The hard ratio is the ratio between the hard limit size of the user quota and the amount of storage actually consumed.
+
+
+ soft_limit (, int, )
+ Soft limitation for a user on the total space available. If exceeded, notification will be sent to user for the grace period mentioned, beyond which user cannot use space.
+
+
+ soft_ratio (, str, )
+ The soft ratio is the ratio between the soft limit size of the user quota and the amount of storage actually consumed.
+
+
+ id (, str, )
+ User quota ID.
+
+
+ size_used (, int, )
+ Size of used space in the filesystem by the user files.
+
+
+ state (, int, )
+ State of the user quota.
+
+
+ uid (, int, )
+ User ID of the user.
+
+
+ unix_name (, str, )
+ Unix user name for this user quota's uid.
+
+
+ windows_names (, str, )
+ Windows user name that maps to this quota's uid.
+
+
+ windows_sids (, str, )
+ Windows SIDs that maps to this quota's uid
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Spandita Panigrahi (@panigs7) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/volume.rst b/ansible_collections/dellemc/unity/docs/modules/volume.rst
new file mode 100644
index 000000000..ed4c5f202
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/volume.rst
@@ -0,0 +1,381 @@
+.. _volume_module:
+
+
+volume -- Manage volume on Unity storage system
+===============================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing volume on Unity storage system includes- Create new volume, Modify volume attributes, Map Volume to host, Unmap volume to host, Display volume details, Delete volume.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.12 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ vol_name (optional, str, None)
+ The name of the volume. Mandatory only for create operation.
+
+
+ vol_id (optional, str, None)
+ The id of the volume.
+
+ It can be used only for get, modify, map/unmap host, or delete operation.
+
+
+ pool_name (optional, str, None)
+ This is the name of the pool where the volume will be created.
+
+ Either the *pool_name* or *pool_id* must be provided to create a new volume.
+
+
+ pool_id (optional, str, None)
+ This is the id of the pool where the volume will be created.
+
+ Either the *pool_name* or *pool_id* must be provided to create a new volume.
+
+
+ size (optional, int, None)
+ The size of the volume.
+
+
+ cap_unit (optional, str, None)
+ The unit of the volume size. It defaults to ``GB``, if not specified.
+
+
+ description (optional, str, None)
+ Description about the volume.
+
+ Description can be removed by passing empty string ("").
+
+
+ snap_schedule (optional, str, None)
+ Snapshot schedule assigned to the volume.
+
+ Add/Remove/Modify the snapshot schedule for the volume.
+
+
+ compression (optional, bool, None)
+ Boolean variable, Specifies whether or not to enable compression. Compression is supported only for thin volumes.
+
+
+ advanced_dedup (optional, bool, None)
+ Boolean variable, Indicates whether or not to enable advanced deduplication.
+
+ Compression should be enabled to enable advanced deduplication.
+
+ It can only be enabled on the all flash high end platforms.
+
+ Deduplicated data will remain as is even after advanced deduplication is disabled.
+
+
+ is_thin (optional, bool, None)
+ Boolean variable, Specifies whether or not it is a thin volume.
+
+ The value is set as ``true`` by default if not specified.
+
+
+ sp (optional, str, None)
+ Storage Processor for this volume.
+
+
+ io_limit_policy (optional, str, None)
+ IO limit policy associated with this volume. Once it is set, it cannot be removed through ansible module but it can be changed.
+
+
+ host_name (optional, str, None)
+ Name of the host to be mapped/unmapped with this volume.
+
+ Either *host_name* or *host_id* can be specified in one task along with *mapping_state*.
+
+
+ host_id (optional, str, None)
+ ID of the host to be mapped/unmapped with this volume.
+
+ Either *host_name* or *host_id* can be specified in one task along with *mapping_state*.
+
+
+ hlu (optional, int, None)
+ Host Lun Unit to be mapped/unmapped with this volume.
+
+ It is an optional parameter, hlu can be specified along with *host_name* or *host_id* and *mapping_state*.
+
+ If *hlu* is not specified, unity will choose it automatically. The maximum value supported is ``255``.
+
+
+ mapping_state (optional, str, None)
+ State of host access for volume.
+
+
+ new_vol_name (optional, str, None)
+ New name of the volume for rename operation.
+
+
+ tiering_policy (optional, str, None)
+ Tiering policy choices for how the storage resource data will be distributed among the tiers available in the pool.
+
+
+ state (True, str, None)
+ State variable to determine whether volume will exist or not.
+
+
+ hosts (optional, list, None)
+ Name of hosts for mapping to a volume.
+
+
+ host_name (optional, str, None)
+ Name of the host.
+
+
+ host_id (optional, str, None)
+ ID of the host.
+
+
+ hlu (optional, str, None)
+ Host Lun Unit to be mapped/unmapped with this volume.
+
+ It is an optional parameter, *hlu* can be specified along with *host_name* or *host_id* and *mapping_state*.
+
+ If *hlu* is not specified, unity will choose it automatically. The maximum value supported is ``255``.
+
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is not supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Create Volume
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ description: "{{description}}"
+ pool_name: "{{pool}}"
+ size: 2
+ cap_unit: "{{cap_GB}}"
+ is_thin: True
+ compression: True
+ advanced_dedup: True
+ state: "{{state_present}}"
+
+ - name: Expand Volume by volume id
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_id: "{{vol_id}}"
+ size: 5
+ cap_unit: "{{cap_GB}}"
+ state: "{{state_present}}"
+
+ - name: Modify Volume, map host by host_name
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ host_name: "{{host_name}}"
+ hlu: 5
+ mapping_state: "{{state_mapped}}"
+ state: "{{state_present}}"
+
+ - name: Modify Volume, unmap host mapping by host_name
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ host_name: "{{host_name}}"
+ mapping_state: "{{state_unmapped}}"
+ state: "{{state_present}}"
+
+ - name: Map multiple hosts to a Volume
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_id: "{{vol_id}}"
+ hosts:
+ - host_name: "10.226.198.248"
+ hlu: 1
+ - host_id: "Host_929"
+ hlu: 2
+ mapping_state: "mapped"
+ state: "present"
+
+ - name: Modify Volume attributes
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ new_vol_name: "{{new_vol_name}}"
+ tiering_policy: "AUTOTIER"
+ compression: True
+ is_thin: True
+ advanced_dedup: True
+ state: "{{state_present}}"
+
+ - name: Delete Volume by vol name
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ state: "{{state_absent}}"
+
+ - name: Delete Volume by vol id
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_id: "{{vol_id}}"
+ state: "{{state_absent}}"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+volume_details (When volume exists, dict, {'current_node': 'NodeEnum.SPB', 'data_reduction_percent': 0, 'data_reduction_ratio': 1.0, 'data_reduction_size_saved': 0, 'default_node': 'NodeEnum.SPB', 'description': None, 'effective_io_limit_max_iops': None, 'effective_io_limit_max_kbps': None, 'existed': True, 'family_base_lun': {'UnityLun': {'hash': 8774954523796, 'id': 'sv_27'}}, 'family_clone_count': 0, 'hash': 8774954522426, 'health': {'UnityHealth': {'hash': 8774954528278}}, 'host_access': [{'accessMask': 'PRODUCTION', 'hlu': 0, 'id': 'Host_75', 'name': '10.226.198.250'}], 'id': 'sv_27', 'io_limit_policy': None, 'is_advanced_dedup_enabled': False, 'is_compression_enabled': None, 'is_data_reduction_enabled': False, 'is_replication_destination': False, 'is_snap_schedule_paused': False, 'is_thin_clone': False, 'is_thin_enabled': False, 'metadata_size': 4294967296, 'metadata_size_allocated': 4026531840, 'name': 'VSI-UNITY-test-task', 'per_tier_size_used': [111400714240, 0, 0], 'pool': {'id': 'pool_3', 'name': 'Extreme_Perf_tier'}, 'size_allocated': 107374182400, 'size_total': 107374182400, 'size_total_with_unit': '100.0 GB', 'size_used': None, 'snap_count': 0, 'snap_schedule': None, 'snap_wwn': '60:06:01:60:5C:F0:50:00:94:3E:91:4D:51:5A:4F:97', 'snaps_size': 0, 'snaps_size_allocated': 0, 'storage_resource': {'UnityStorageResource': {'hash': 8774954518887}}, 'tiering_policy': 'TieringPolicyEnum.AUTOTIER_HIGH', 'type': 'LUNTypeEnum.VMWARE_ISCSI', 'wwn': '60:06:01:60:5C:F0:50:00:00:B5:95:61:2E:34:DB:B2'})
+ Details of the volume.
+
+
+ id (, str, )
+ The system generated ID given to the volume.
+
+
+ name (, str, )
+ Name of the volume.
+
+
+ description (, str, )
+ Description about the volume.
+
+
+ is_data_reduction_enabled (, bool, )
+ Whether or not compression enabled on this volume.
+
+
+ size_total_with_unit (, str, )
+ Size of the volume with actual unit.
+
+
+ snap_schedule (, dict, )
+ Snapshot schedule applied to this volume.
+
+
+ tiering_policy (, str, )
+ Tiering policy applied to this volume.
+
+
+ current_sp (, str, )
+ Current storage processor for this volume.
+
+
+ pool (, dict, )
+ The pool in which this volume is allocated.
+
+
+ host_access (, list, )
+ Host mapped to this volume.
+
+
+ io_limit_policy (, dict, )
+ IO limit policy associated with this volume.
+
+
+ wwn (, str, )
+ The world wide name of this volume.
+
+
+ is_thin_enabled (, bool, )
+ Indicates whether thin provisioning is enabled for this volume.
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Arindam Datta (@arindam-emc) <ansible.team@dell.com>
+- Pavan Mudunuri(@Pavan-Mudunuri) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/meta/execution-environment.yml b/ansible_collections/dellemc/unity/meta/execution-environment.yml
new file mode 100644
index 000000000..5aa14625e
--- /dev/null
+++ b/ansible_collections/dellemc/unity/meta/execution-environment.yml
@@ -0,0 +1,5 @@
+---
+version: 1
+dependencies:
+ galaxy: requirements.yml
+ python: requirements.txt
diff --git a/ansible_collections/dellemc/unity/meta/runtime.yml b/ansible_collections/dellemc/unity/meta/runtime.yml
new file mode 100644
index 000000000..31f912445
--- /dev/null
+++ b/ansible_collections/dellemc/unity/meta/runtime.yml
@@ -0,0 +1,79 @@
+---
+requires_ansible: ">=2.12"
+plugin_routing:
+ modules:
+ dellemc_unity_info:
+ redirect: dellemc.unity.info
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use info instead.
+ dellemc_unity_gatherfacts:
+ redirect: dellemc.unity.info
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use info instead.
+ dellemc_unity_consistencygroup:
+ redirect: dellemc.unity.consistencygroup
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use consistencygroup instead.
+ dellemc_unity_filesystem_snapshot:
+ redirect: dellemc.unity.filesystem_snapshot
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use filesystem_snapshot instead.
+ dellemc_unity_filesystem:
+ redirect: dellemc.unity.filesystem
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use filesystem instead.
+ dellemc_unity_host:
+ redirect: dellemc.unity.host
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use host instead.
+ dellemc_unity_nasserver:
+ redirect: dellemc.unity.nasserver
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use nasserver instead.
+ dellemc_unity_nfs:
+ redirect: dellemc.unity.nfs
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use nfs instead.
+ dellemc_unity_smbshare:
+ redirect: dellemc.unity.smbshare
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use smbshare instead.
+ dellemc_unity_snapshot:
+ redirect: dellemc.unity.snapshot
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use snapshot instead.
+ dellemc_unity_snapshotschedule:
+ redirect: dellemc.unity.snapshotschedule
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use snapshotschedule instead.
+ dellemc_unity_storagepool:
+ redirect: dellemc.unity.storagepool
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use storagepool instead.
+ dellemc_unity_tree_quota:
+ redirect: dellemc.unity.tree_quota
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use tree_quota instead.
+ dellemc_unity_user_quota:
+ redirect: dellemc.unity.user_quota
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use user_quota instead.
+ dellemc_unity_volume:
+ redirect: dellemc.unity.volume
+ deprecation:
+ removal_date: "2024-03-31"
+ warning_text: Use volume instead.
diff --git a/ansible_collections/dellemc/unity/plugins/doc_fragments/unity.py b/ansible_collections/dellemc/unity/plugins/doc_fragments/unity.py
new file mode 100644
index 000000000..1ebc7f40f
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/doc_fragments/unity.py
@@ -0,0 +1,53 @@
+# Copyright: (c) 2020, Dell Technologies.
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Documentation fragment for Unity (unity)
+ DOCUMENTATION = r'''
+ options:
+ unispherehost:
+ required: true
+ description:
+ - IP or FQDN of the Unity management server.
+ type: str
+ username:
+ type: str
+ required: true
+ description:
+ - The username of the Unity management server.
+ password:
+ type: str
+ required: true
+ description:
+ - The password of the Unity management server.
+ validate_certs:
+ type: bool
+ default: true
+ aliases:
+ - verifycert
+ description:
+ - Boolean variable to specify whether or not to validate SSL
+ certificate.
+ - C(true) - Indicates that the SSL certificate should be verified.
+ - C(false) - Indicates that the SSL certificate should not be
+ verified.
+ port:
+ description:
+ - Port number through which communication happens with Unity
+ management server.
+ type: int
+ default: 443
+ requirements:
+ - A Dell Unity Storage device version 5.1 or later.
+ - Ansible-core 2.12 or later.
+ - Python 3.9, 3.10 or 3.11.
+ - Storops Python SDK 1.2.11.
+ notes:
+ - The modules present in this collection named as 'dellemc.unity'
+ are built to support the Dell Unity storage platform.
+'''
diff --git a/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/__init__.py b/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/__init__.py
diff --git a/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/logging_handler.py b/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/logging_handler.py
new file mode 100644
index 000000000..232814e54
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/logging_handler.py
@@ -0,0 +1,25 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Custom rotating file handler for Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from datetime import datetime
+from logging.handlers import RotatingFileHandler
+
+
+class CustomRotatingFileHandler(RotatingFileHandler):
+ def rotation_filename(self, default_name):
+ """
+ Modify the filename of a log file when rotating.
+ :param default_name: The default name of the log file.
+ """
+ src_file_name = default_name.split('.')
+ dest_file_name = "{0}_{1}.{2}.{3}".format(
+ src_file_name[0], '{0:%Y%m%d}'.format(datetime.now()),
+ src_file_name[1], src_file_name[2]
+ )
+ return dest_file_name
diff --git a/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/utils.py b/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/utils.py
new file mode 100644
index 000000000..c44b2bcee
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/utils.py
@@ -0,0 +1,254 @@
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import logging
+from decimal import Decimal
+import re
+import traceback
+import math
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell.logging_handler \
+ import CustomRotatingFileHandler
+from ansible.module_utils.basic import missing_required_lib
+
+try:
+ import urllib3
+
+ urllib3.disable_warnings()
+ HAS_URLLIB3, URLLIB3_IMP_ERR = True, None
+except ImportError:
+ HAS_URLLIB3, URLLIB3_IMP_ERR = False, traceback.format_exc()
+
+try:
+ from storops import UnitySystem
+ from storops.unity.client import UnityClient
+ from storops.unity.resource import host, cg, snap_schedule, snap, \
+ cifs_share, nas_server
+ from storops.unity.resource.lun import UnityLun
+ from storops.unity.resource.pool import UnityPool, UnityPoolList, RaidGroupParameter
+ from storops.unity.resource.filesystem import UnityFileSystem, \
+ UnityFileSystemList
+ from storops.unity.resource.nas_server import UnityNasServer
+ from storops.unity.resource.nfs_share import UnityNfsShare, \
+ UnityNfsShareList
+ from storops.unity.resource.snap_schedule import UnitySnapScheduleList, \
+ UnitySnapSchedule
+ from storops.unity.resource.replication_session import UnityReplicationSession
+ from storops.unity.enums import HostInitiatorTypeEnum, \
+ TieringPolicyEnum, ScheduleTypeEnum, DayOfWeekEnum, NodeEnum, \
+ HostLUNAccessEnum, HostTypeEnum, AccessPolicyEnum, \
+ FilesystemTypeEnum, FSSupportedProtocolEnum, FSFormatEnum, \
+ NFSTypeEnum, NFSShareDefaultAccessEnum, NFSShareSecurityEnum, \
+ FilesystemSnapAccessTypeEnum, FSLockingPolicyEnum, \
+ CifsShareOfflineAvailabilityEnum, NasServerUnixDirectoryServiceEnum, \
+ KdcTypeEnum, NodeEnum, FileInterfaceRoleEnum
+ from storops.exception import UnityResourceNotFoundError, \
+ StoropsConnectTimeoutError, UnityNfsShareNameExistedError
+ from storops.connection.exceptions import HttpError, HTTPClientError
+ from storops.unity.resource.user_quota import UnityUserQuota, \
+ UnityUserQuotaList
+ from storops.unity.resource.tree_quota import UnityTreeQuota, \
+ UnityTreeQuotaList
+ from storops.unity.resource.quota_config import UnityQuotaConfig, \
+ UnityQuotaConfigList
+ from storops.unity.resource.storage_resource import UnityStorageResource
+ from storops.unity.enums import QuotaPolicyEnum, RaidTypeEnum, \
+ RaidStripeWidthEnum, StoragePoolTypeEnum
+ from storops.unity.resource.disk import UnityDisk, \
+ UnityDiskList, UnityDiskGroup, UnityDiskGroupList
+ from storops.unity.resource.cifs_server import UnityCifsServer
+ from storops.unity.resource.nfs_server import UnityNfsServer
+ from storops.unity.resource.interface import UnityFileInterface
+
+ HAS_UNITY_SDK, STOROPS_IMP_ERR = True, None
+except ImportError:
+ HAS_UNITY_SDK, STOROPS_IMP_ERR = False, traceback.format_exc()
+
+try:
+ from pkg_resources import parse_version
+ import pkg_resources
+
+ HAS_PKG_RESOURCE, PKG_RESOURCE_IMP_ERR = True, None
+except ImportError:
+ HAS_PKG_RESOURCE, PKG_RESOURCE_IMP_ERR = False, traceback.format_exc()
+
+
+def ensure_required_libs(module):
+ """Check required libraries"""
+
+ if not HAS_UNITY_SDK:
+ module.fail_json(msg=missing_required_lib("storops"),
+ exception=STOROPS_IMP_ERR)
+
+ if not HAS_PKG_RESOURCE:
+ module.fail_json(msg=missing_required_lib("pkg_resources"),
+ exception=PKG_RESOURCE_IMP_ERR)
+
+ if not HAS_URLLIB3:
+ module.fail_json(msg=missing_required_lib("urllib3"),
+ exception=URLLIB3_IMP_ERR)
+
+ min_ver = '1.2.11'
+ try:
+ curr_version = pkg_resources.require("storops")[0].version
+ except Exception as err:
+ module.fail_json(msg="Failed to get Storops SDK version - "
+ "{0}".format(str(err)))
+
+ if parse_version(curr_version) < parse_version(min_ver):
+ module.fail_json(msg="Storops {0} is not supported. "
+ "Required minimum version is "
+ "{1}".format(curr_version, min_ver))
+
+
+def get_unity_management_host_parameters():
+ """Provides common access parameters required for the
+ ansible modules on Unity StorageSystem"""
+
+ return dict(
+ unispherehost=dict(type='str', required=True, no_log=True),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True),
+ validate_certs=dict(type='bool', required=False,
+ aliases=['verifycert'], default=True),
+ port=dict(type='int', required=False, default=443, no_log=True)
+ )
+
+
+def get_unity_unisphere_connection(module_params, application_type=None):
+ """Establishes connection with Unity array using storops SDK"""
+
+ if HAS_UNITY_SDK:
+ conn = UnitySystem(host=module_params['unispherehost'],
+ port=module_params['port'],
+ verify=module_params['validate_certs'],
+ username=module_params['username'],
+ password=module_params['password'],
+ application_type=application_type)
+ return conn
+
+
+def get_logger(module_name, log_file_name='ansible_unity.log',
+ log_devel=logging.INFO):
+ """Intializes and returns the logger object
+
+ :param module_name: Name of module to be part of log message
+ :param log_file_name: Name of file in which the log messages get appended
+ :param log_devel: Log level
+ """
+
+ FORMAT = '%(asctime)-15s %(filename)s %(levelname)s : %(message)s'
+ max_bytes = 5 * 1024 * 1024
+ logging.basicConfig(filename=log_file_name, format=FORMAT)
+ LOG = logging.getLogger(module_name)
+ LOG.setLevel(log_devel)
+ handler = CustomRotatingFileHandler(log_file_name,
+ maxBytes=max_bytes,
+ backupCount=5)
+ formatter = logging.Formatter(FORMAT)
+ handler.setFormatter(formatter)
+ LOG.addHandler(handler)
+ LOG.propagate = False
+ return LOG
+
+
+KB_IN_BYTES = 1024
+MB_IN_BYTES = 1024 * 1024
+GB_IN_BYTES = 1024 * 1024 * 1024
+TB_IN_BYTES = 1024 * 1024 * 1024 * 1024
+
+
+def get_size_bytes(size, cap_units):
+ """Convert the given size to bytes"""
+
+ if size is not None and size > 0:
+ if cap_units in ('kb', 'KB'):
+ return size * KB_IN_BYTES
+ elif cap_units in ('mb', 'MB'):
+ return size * MB_IN_BYTES
+ elif cap_units in ('gb', 'GB'):
+ return size * GB_IN_BYTES
+ elif cap_units in ('tb', 'TB'):
+ return size * TB_IN_BYTES
+ else:
+ return size
+ else:
+ return 0
+
+
+def convert_size_with_unit(size_bytes):
+ """Convert size in byte with actual unit like KB,MB,GB,TB,PB etc."""
+
+ if not isinstance(size_bytes, int):
+ raise ValueError('This method takes Integer type argument only')
+ if size_bytes == 0:
+ return "0B"
+ size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
+ i = int(math.floor(math.log(size_bytes, 1024)))
+ p = math.pow(1024, i)
+ s = round(size_bytes / p, 2)
+ return "%s %s" % (s, size_name[i])
+
+
+def get_size_in_gb(size, cap_units):
+ """Convert the given size to size in GB, size is restricted to 2 decimal places"""
+
+ size_in_bytes = get_size_bytes(size, cap_units)
+ size = Decimal(size_in_bytes / GB_IN_BYTES)
+ size_in_gb = round(size)
+ return size_in_gb
+
+
+def is_input_empty(item):
+ """Check whether input string is empty"""
+
+ if item == "" or item.isspace():
+ return True
+ else:
+ return False
+
+
+def is_size_negative(size):
+ """Check whether size is negative"""
+
+ if size and size < 0:
+ return True
+ else:
+ return False
+
+
+def has_special_char(value):
+ """Check whether the string has any special character.
+ It allows '_' character"""
+
+ regex = re.compile(r'[@!#$%^&*()<>?/\|}{~:]')
+ if regex.search(value) is None:
+ return False
+ else:
+ return True
+
+
+def is_initiator_valid(value):
+ """Validate format of the FC or iSCSI initiator"""
+
+ if value.startswith('iqn') or re.match(r"([A-Fa-f0-9]{2}:){15}[A-Fa-f0-9]{2}", value, re.I) is not None:
+ return True
+ else:
+ return False
+
+
+def is_valid_netmask(netmask):
+ """Validates if ip is valid subnet mask"""
+
+ if netmask:
+ regexp = re.compile(r'^((128|192|224|240|248|252|254)\.0\.0\.0)|'
+ r'(255\.(((0|128|192|224|240|248|252|254)\.0\.0)|'
+ r'(255\.(((0|128|192|224|240|248|252|254)\.0)|'
+ r'255\.(0|128|192|224|240|248|252|254)))))$')
+ if not regexp.search(netmask):
+ return False
+ return True
diff --git a/ansible_collections/dellemc/unity/plugins/modules/cifsserver.py b/ansible_collections/dellemc/unity/plugins/modules/cifsserver.py
new file mode 100644
index 000000000..d40c4f11d
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/cifsserver.py
@@ -0,0 +1,630 @@
+#!/usr/bin/python
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing CIFS server on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: cifsserver
+version_added: '1.4.0'
+short_description: Manage CIFS server on Unity storage system
+description:
+- Managing the CIFS server on the Unity storage system includes creating CIFS server, getting CIFS server details
+ and deleting CIFS server.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Akash Shendge (@shenda1) <ansible.team@dell.com>
+
+options:
+ nas_server_name:
+ description:
+ - Name of the NAS server on which CIFS server will be hosted.
+ type: str
+ nas_server_id:
+ description:
+ - ID of the NAS server on which CIFS server will be hosted.
+ type: str
+ netbios_name:
+ description:
+ - The computer name of the SMB server in Windows network.
+ type: str
+ workgroup:
+ description:
+ - Standalone SMB server workgroup.
+ type: str
+ local_password:
+ description:
+ - Standalone SMB server administrator password.
+ type: str
+ domain:
+ description:
+ - The domain name where the SMB server is registered in Active Directory.
+ type: str
+ domain_username:
+ description:
+ - Active Directory domain user name.
+ type: str
+ domain_password:
+ description:
+ - Active Directory domain password.
+ type: str
+ cifs_server_name:
+ description:
+ - The name of the CIFS server.
+ type: str
+ cifs_server_id:
+ description:
+ - The ID of the CIFS server.
+ type: str
+ interfaces:
+ description:
+ - List of file IP interfaces that service CIFS protocol of SMB server.
+ type: list
+ elements: str
+ unjoin_cifs_server_account:
+ description:
+ - Keep SMB server account unjoined in Active Directory after deletion.
+ - C(false) specifies keep SMB server account joined after deletion.
+ - C(true) specifies unjoin SMB server account from Active Directory before deletion.
+ type: bool
+ state:
+ description:
+ - Define whether the CIFS server should exist or not.
+ choices: [absent, present]
+ required: true
+ type: str
+notes:
+- The I(check_mode) is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create CIFS server belonging to Active Directory
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "test_nas1"
+ cifs_server_name: "test_cifs"
+ domain: "ad_domain"
+ domain_username: "domain_username"
+ domain_password: "domain_password"
+ state: "present"
+
+- name: Get CIFS server details using CIFS server ID
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cifs_server_id: "cifs_37"
+ state: "present"
+
+- name: Get CIFS server details using NAS server name
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "test_nas1"
+ state: "present"
+
+- name: Delete CIFS server
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cifs_server_id: "cifs_37"
+ unjoin_cifs_server_account: True
+ domain_username: "domain_username"
+ domain_password: "domain_password"
+ state: "absent"
+
+- name: Create standalone CIFS server
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ netbios_name: "ANSIBLE_CIFS"
+ workgroup: "ansible"
+ local_password: "Password123!"
+ nas_server_name: "test_nas1"
+ state: "present"
+
+- name: Get CIFS server details using netbios name
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ netbios_name: "ANSIBLE_CIFS"
+ state: "present"
+
+- name: Delete standalone CIFS server
+ dellemc.unity.cifsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cifs_server_id: "cifs_40"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+
+cifs_server_details:
+ description: Details of the CIFS server.
+ returned: When CIFS server exists
+ type: dict
+ contains:
+ id:
+ description: Unique identifier of the CIFS server instance.
+ type: str
+ name:
+ description: User-specified name for the SMB server.
+ type: str
+ netbios_name:
+ description: Computer Name of the SMB server in windows network.
+ type: str
+ description:
+ description: Description of the SMB server.
+ type: str
+ domain:
+ description: Domain name where SMB server is registered in Active Directory.
+ type: str
+ workgroup:
+ description: Windows network workgroup for the SMB server.
+ type: str
+ is_standalone:
+ description: Indicates whether the SMB server is standalone.
+ type: bool
+ nasServer:
+ description: Information about the NAS server in the storage system.
+ type: dict
+ contains:
+ UnityNasServer:
+ description: Information about the NAS server in the storage system.
+ type: dict
+ contains:
+ id:
+ description: Unique identifier of the NAS server instance.
+ type: str
+ file_interfaces:
+ description: The file interfaces associated with the NAS server.
+ type: dict
+ contains:
+ UnityFileInterfaceList:
+ description: List of file interfaces associated with the NAS server.
+ type: list
+ contains:
+ UnityFileInterface:
+ description: Details of file interface associated with the NAS server.
+ type: dict
+ contains:
+ id:
+ description: Unique identifier of the file interface.
+ type: str
+ smb_multi_channel_supported:
+ description: Indicates whether the SMB 3.0+ multichannel feature is supported.
+ type: bool
+ smb_protocol_versions:
+ description: Supported SMB protocols, such as 1.0, 2.0, 2.1, 3.0, and so on.
+ type: list
+ smbca_supported:
+ description: Indicates whether the SMB server supports continuous availability.
+ type: bool
+ sample: {
+ "description": null,
+ "domain": "xxx.xxx.xxx.com",
+ "existed": true,
+ "file_interfaces": {
+ "UnityFileInterfaceList": [
+ {
+ "UnityFileInterface": {
+ "hash": -9223363258905013637,
+ "id": "if_43"
+ }
+ }
+ ]
+ },
+ "hash": -9223363258905010379,
+ "health": {
+ "UnityHealth": {
+ "hash": 8777949765559
+ }
+ },
+ "id": "cifs_40",
+ "is_standalone": false,
+ "last_used_organizational_unit": "ou=Computers,ou=Dell NAS servers",
+ "name": "ansible_cifs",
+ "nas_server": {
+ "UnityNasServer": {
+ "hash": 8777949765531,
+ "id": "nas_18"
+ }
+ },
+ "netbios_name": "ANSIBLE_CIFS",
+ "smb_multi_channel_supported": true,
+ "smb_protocol_versions": [
+ "1.0",
+ "2.0",
+ "2.1",
+ "3.0"
+ ],
+ "smbca_supported": true,
+ "workgroup": null
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell import utils
+
+LOG = utils.get_logger('cifsserver')
+
+
+application_type = "Ansible/1.6.0"
+
+
+class CIFSServer(object):
+ """Class with CIFS server operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_cifs_server_parameters())
+
+ mutually_exclusive = [['nas_server_name', 'nas_server_id'], ['cifs_server_id', 'cifs_server_name'],
+ ['cifs_server_id', 'netbios_name']]
+ required_one_of = [['cifs_server_id', 'cifs_server_name', 'netbios_name', 'nas_server_name', 'nas_server_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of
+ )
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ LOG.info('Check Mode Flag %s', self.module.check_mode)
+
+ def get_details(self, cifs_server_id=None, cifs_server_name=None, netbios_name=None, nas_server_id=None):
+ """Get CIFS server details.
+ :param: cifs_server_id: The ID of the CIFS server
+ :param: cifs_server_name: The name of the CIFS server
+ :param: netbios_name: Name of the SMB server in windows network
+ :param: nas_server_id: The ID of the NAS server
+ :return: Dict containing CIFS server details if exists
+ """
+
+ LOG.info("Getting CIFS server details")
+ id_or_name = get_id_name(cifs_server_id, cifs_server_name, netbios_name, nas_server_id)
+
+ try:
+ if cifs_server_id:
+ cifs_server_details = self.unity_conn.get_cifs_server(_id=cifs_server_id)
+ return process_response(cifs_server_details)
+
+ if cifs_server_name:
+ cifs_server_details = self.unity_conn.get_cifs_server(name=cifs_server_name)
+ return process_response(cifs_server_details)
+
+ if netbios_name:
+ cifs_server_details = self.unity_conn.get_cifs_server(netbios_name=netbios_name)
+ if len(cifs_server_details) > 0:
+ return process_dict(cifs_server_details._get_properties())
+
+ if nas_server_id:
+ cifs_server_details = self.unity_conn.get_cifs_server(nas_server=nas_server_id)
+ if len(cifs_server_details) > 0:
+ return process_dict(cifs_server_details._get_properties())
+ return None
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ msg = "Failed to get CIFS server: %s due to incorrect " \
+ "username/password error: %s" % (id_or_name, str(e))
+ else:
+ msg = "Failed to get CIFS server: %s with error: %s" % (id_or_name, str(e))
+ except utils.UnityResourceNotFoundError:
+ msg = "CIFS server with ID %s not found" % cifs_server_id
+ LOG.info(msg)
+ return None
+ except utils.StoropsConnectTimeoutError as e:
+ msg = "Failed to get CIFS server: %s with error: %s. Please check unispherehost IP: %s" % (
+ id_or_name, str(e), self.module.params['unispherehost'])
+ except Exception as e:
+ msg = "Failed to get details of CIFS server: %s with error: %s" % (id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_cifs_server_instance(self, cifs_server_id):
+ """Get CIFS server instance.
+ :param: cifs_server_id: The ID of the CIFS server
+ :return: Return CIFS server instance if exists
+ """
+
+ try:
+ cifs_server_obj = utils.UnityCifsServer.get(cli=self.unity_conn._cli, _id=cifs_server_id)
+ return cifs_server_obj
+
+ except Exception as e:
+ error_msg = "Failed to get the CIFS server %s instance" \
+ " with error %s" % (cifs_server_id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def delete_cifs_server(self, cifs_server_id, skip_unjoin=None, domain_username=None, domain_password=None):
+ """Delete CIFS server.
+ :param: cifs_server_id: The ID of the CIFS server
+ :param: skip_unjoin: Flag indicating whether to unjoin SMB server account from AD before deletion
+ :param: domain_username: The domain username
+ :param: domain_password: The domain password
+ :return: Return True if CIFS server is deleted
+ """
+
+ LOG.info("Deleting CIFS server")
+ try:
+ if not self.module.check_mode:
+ cifs_obj = self.get_cifs_server_instance(cifs_server_id=cifs_server_id)
+ cifs_obj.delete(skip_domain_unjoin=skip_unjoin, username=domain_username, password=domain_password)
+ return True
+
+ except Exception as e:
+ msg = "Failed to delete CIFS server: %s with error: %s" % (cifs_server_id, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_nas_server_id(self, nas_server_name):
+ """Get NAS server ID.
+ :param: nas_server_name: The name of NAS server
+ :return: Return NAS server ID if exists
+ """
+
+ LOG.info("Getting NAS server ID")
+ try:
+ obj_nas = self.unity_conn.get_nas_server(name=nas_server_name)
+ return obj_nas.get_id()
+
+ except Exception as e:
+ msg = "Failed to get details of NAS server: %s with error: %s" % (nas_server_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def is_modify_interfaces(self, cifs_server_details):
+ """Check if modification is required in existing interfaces
+ :param: cifs_server_details: CIFS server details
+ :return: Flag indicating if modification is required
+ """
+
+ existing_interfaces = []
+ if cifs_server_details['file_interfaces']['UnityFileInterfaceList']:
+ for interface in cifs_server_details['file_interfaces']['UnityFileInterfaceList']:
+ existing_interfaces.append(interface['UnityFileInterface']['id'])
+
+ for interface in self.module.params['interfaces']:
+ if interface not in existing_interfaces:
+ return True
+ return False
+
+ def is_modification_required(self, cifs_server_details):
+ """Check if modification is required in existing CIFS server
+ :param: cifs_server_details: CIFS server details
+ :return: Flag indicating if modification is required
+ """
+
+ LOG.info("Checking if any modification is required")
+ param_list = ['netbios_name', 'workgroup']
+ for param in param_list:
+ if self.module.params[param] is not None and cifs_server_details[param] is not None and \
+ self.module.params[param].upper() != cifs_server_details[param]:
+ return True
+
+ # Check for domain
+ if self.module.params['domain'] is not None and cifs_server_details['domain'] is not None and \
+ self.module.params['domain'] != cifs_server_details['domain']:
+ return True
+
+ # Check file interfaces
+ if self.module.params['interfaces'] is not None:
+ return self.is_modify_interfaces(cifs_server_details)
+ return False
+
+ def create_cifs_server(self, nas_server_id, interfaces=None, netbios_name=None, cifs_server_name=None, domain=None,
+ domain_username=None, domain_password=None, workgroup=None, local_password=None):
+ """Create CIFS server.
+ :param: nas_server_id: The ID of NAS server
+ :param: interfaces: List of file interfaces
+ :param: netbios_name: Name of the SMB server in windows network
+ :param: cifs_server_name: Name of the CIFS server
+ :param: domain: The domain name where the SMB server is registered in Active Directory
+ :param: domain_username: The domain username
+ :param: domain_password: The domain password
+ :param: workgroup: Standalone SMB server workgroup
+ :param: local_password: Standalone SMB server admin password
+ :return: Return True if CIFS server is created
+ """
+
+ LOG.info("Creating CIFS server")
+ try:
+ if not self.module.check_mode:
+ utils.UnityCifsServer.create(cli=self.unity_conn._cli, nas_server=nas_server_id, interfaces=interfaces,
+ netbios_name=netbios_name, name=cifs_server_name, domain=domain,
+ domain_username=domain_username, domain_password=domain_password,
+ workgroup=workgroup, local_password=local_password)
+ return True
+ except Exception as e:
+ msg = "Failed to create CIFS server with error: %s" % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def validate_params(self):
+ """Validate the parameters
+ """
+
+ param_list = ['nas_server_id', 'nas_server_name', 'domain', 'cifs_server_id', 'cifs_server_name',
+ 'local_password', 'netbios_name', 'workgroup', 'domain_username', 'domain_password']
+
+ msg = "Please provide valid {0}"
+ for param in param_list:
+ if self.module.params[param] is not None and len(self.module.params[param].strip()) == 0:
+ errmsg = msg.format(param)
+ self.module.fail_json(msg=errmsg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on CIFS server module based on parameters
+ passed in the playbook
+ """
+ cifs_server_id = self.module.params['cifs_server_id']
+ cifs_server_name = self.module.params['cifs_server_name']
+ nas_server_id = self.module.params['nas_server_id']
+ nas_server_name = self.module.params['nas_server_name']
+ netbios_name = self.module.params['netbios_name']
+ workgroup = self.module.params['workgroup']
+ local_password = self.module.params['local_password']
+ domain = self.module.params['domain']
+ domain_username = self.module.params['domain_username']
+ domain_password = self.module.params['domain_password']
+ interfaces = self.module.params['interfaces']
+ unjoin_cifs_server_account = self.module.params['unjoin_cifs_server_account']
+ state = self.module.params['state']
+
+ # result is a dictionary that contains changed status and CIFS server details
+ result = dict(
+ changed=False,
+ cifs_server_details={}
+ )
+
+ # Validate the parameters
+ self.validate_params()
+
+ if nas_server_name is not None:
+ nas_server_id = self.get_nas_server_id(nas_server_name)
+
+ cifs_server_details = self.get_details(cifs_server_id=cifs_server_id,
+ cifs_server_name=cifs_server_name,
+ netbios_name=netbios_name,
+ nas_server_id=nas_server_id)
+
+ # Check if modification is required
+ if cifs_server_details:
+ if cifs_server_id is None:
+ cifs_server_id = cifs_server_details['id']
+ modify_flag = self.is_modification_required(cifs_server_details)
+ if modify_flag:
+ self.module.fail_json(msg="Modification is not supported through Ansible module")
+
+ if not cifs_server_details and state == 'present':
+ if not nas_server_id:
+ self.module.fail_json(msg="Please provide nas server id/name to create CIFS server.")
+
+ if any([netbios_name, workgroup, local_password]) and not all([netbios_name, workgroup, local_password]):
+ msg = "netbios_name, workgroup and local_password " \
+ "are required to create standalone CIFS server."
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ result['changed'] = self.create_cifs_server(nas_server_id, interfaces, netbios_name,
+ cifs_server_name, domain, domain_username, domain_password,
+ workgroup, local_password)
+
+ if state == 'absent' and cifs_server_details:
+ skip_unjoin = None
+ if unjoin_cifs_server_account is not None:
+ skip_unjoin = not unjoin_cifs_server_account
+ result['changed'] = self.delete_cifs_server(cifs_server_id, skip_unjoin, domain_username,
+ domain_password)
+
+ if state == 'present':
+ result['cifs_server_details'] = self.get_details(cifs_server_id=cifs_server_id,
+ cifs_server_name=cifs_server_name,
+ netbios_name=netbios_name,
+ nas_server_id=nas_server_id)
+ LOG.info("Process Dict: %s", result['cifs_server_details'])
+ self.module.exit_json(**result)
+
+
+def get_id_name(cifs_server_id=None, cifs_server_name=None, netbios_name=None, nas_server_id=None):
+ """Get the id_or_name.
+ :param: cifs_server_id: The ID of CIFS server
+ :param: cifs_server_name: The name of CIFS server
+ :param: netbios_name: Name of the SMB server in windows network
+ :param: nas_server_id: The ID of NAS server
+ :return: Return id_or_name
+ """
+ if cifs_server_id:
+ id_or_name = cifs_server_id
+ elif cifs_server_name:
+ id_or_name = cifs_server_name
+ elif netbios_name:
+ id_or_name = netbios_name
+ else:
+ id_or_name = nas_server_id
+ return id_or_name
+
+
+def process_response(cifs_server_details):
+ """Process CIFS server details.
+ :param: cifs_server_details: Dict containing CIFS server details
+ :return: Processed dict containing CIFS server details
+ """
+ if cifs_server_details.existed:
+ return cifs_server_details._get_properties()
+
+
+def process_dict(cifs_server_details):
+ """Process CIFS server details.
+ :param: cifs_server_details: Dict containing CIFS server details
+ :return: Processed dict containing CIFS server details
+ """
+ param_list = ['description', 'domain', 'file_interfaces', 'health', 'id', 'is_standalone', 'name', 'nas_server'
+ 'netbios_name', 'smb_multi_channel_supported', 'smb_protocol_versions', 'smbca_supported',
+ 'workgroup', 'netbios_name']
+
+ for param in param_list:
+ if param in cifs_server_details:
+ cifs_server_details[param] = cifs_server_details[param][0]
+ return cifs_server_details
+
+
+def get_cifs_server_parameters():
+ """This method provide parameters required for the ansible
+ CIFS server module on Unity"""
+ return dict(
+ cifs_server_id=dict(), cifs_server_name=dict(),
+ netbios_name=dict(), workgroup=dict(),
+ local_password=dict(no_log=True), domain=dict(),
+ domain_username=dict(), domain_password=dict(no_log=True),
+ nas_server_name=dict(), nas_server_id=dict(),
+ interfaces=dict(type='list', elements='str'),
+ unjoin_cifs_server_account=dict(type='bool'),
+ state=dict(required=True, type='str', choices=['present', 'absent']),
+ )
+
+
+def main():
+ """Create Unity CIFS server object and perform action on it
+ based on user input from playbook"""
+ obj = CIFSServer()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py b/ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py
new file mode 100644
index 000000000..14e4de506
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py
@@ -0,0 +1,1516 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing consistency group on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+module: consistencygroup
+version_added: '1.1.0'
+short_description: Manage consistency groups on Unity storage system
+description:
+- Managing the consistency group on the Unity storage system includes
+ creating new consistency group, adding volumes to consistency
+ group, removing volumes from consistency group, mapping hosts to
+ consistency group, unmapping hosts from consistency group,
+ renaming consistency group, modifying attributes of consistency group,
+ enabling replication in consistency group, disabling replication in
+ consistency group and deleting consistency group.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Akash Shendge (@shenda1) <ansible.team@dell.com>
+
+options:
+ cg_name:
+ description:
+ - The name of the consistency group.
+ - It is mandatory for the create operation.
+ - Specify either I(cg_name) or I(cg_id) (but not both) for any operation.
+ type: str
+ cg_id:
+ description:
+ - The ID of the consistency group.
+ - It can be used only for get, modify, add/remove volumes, or delete
+ operations.
+ type: str
+ volumes:
+ description:
+ - This is a list of volumes.
+ - Either the volume ID or name must be provided for adding/removing
+ existing volumes from consistency group.
+ - If I(volumes) are given, then I(vol_state) should also be specified.
+ - Volumes cannot be added/removed from consistency group, if the
+ consistency group or the volume has snapshots.
+ type: list
+ elements: dict
+ suboptions:
+ vol_id:
+ description:
+ - The ID of the volume.
+ type: str
+ vol_name:
+ description:
+ - The name of the volume.
+ type: str
+ vol_state:
+ description:
+ - String variable, describes the state of volumes inside consistency
+ group.
+ - If I(volumes) are given, then I(vol_state) should also be specified.
+ choices: [present-in-group , absent-in-group]
+ type: str
+ new_cg_name:
+ description:
+ - The new name of the consistency group, used in rename operation.
+ type: str
+ description:
+ description:
+ - Description of the consistency group.
+ type: str
+ snap_schedule:
+ description:
+ - Snapshot schedule assigned to the consistency group.
+ - Specifying an empty string "" removes the existing snapshot schedule
+ from consistency group.
+ type: str
+ tiering_policy:
+ description:
+ - Tiering policy choices for how the storage resource data will be
+ distributed among the tiers available in the pool.
+ choices: ['AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']
+ type: str
+ hosts:
+ description:
+ - This is a list of hosts.
+ - Either the host ID or name must be provided for mapping/unmapping
+ hosts for a consistency group.
+ - If I(hosts) are given, then I(mapping_state) should also be specified.
+ - Hosts cannot be mapped to a consistency group, if the
+ consistency group has no volumes.
+ - When a consistency group is being mapped to the host,
+ users should not use the volume module to map the volumes
+ in the consistency group to hosts.
+ type: list
+ elements: dict
+ suboptions:
+ host_id:
+ description:
+ - The ID of the host.
+ type: str
+ host_name:
+ description:
+ - The name of the host.
+ type: str
+ mapping_state:
+ description:
+ - String variable, describes the state of hosts inside the consistency
+ group.
+ - If I(hosts) are given, then I(mapping_state) should also be specified.
+ choices: [mapped , unmapped]
+ type: str
+ replication_params:
+ description:
+ - Settings required for enabling replication.
+ type: dict
+ suboptions:
+ destination_cg_name:
+ description:
+ - Name of the destination consistency group.
+ - Default value will be source consistency group name prefixed by 'DR_'.
+ type: str
+ replication_mode:
+ description:
+ - The replication mode.
+ type: str
+ required: true
+ choices: ['asynchronous', 'manual']
+ rpo:
+ description:
+ - Maximum time to wait before the system syncs the source and destination LUNs.
+ - Option I(rpo) should be specified if the I(replication_mode) is C(asynchronous).
+ - The value should be in range of C(5) to C(1440).
+ type: int
+ replication_type:
+ description:
+ - Type of replication.
+ choices: ['local', 'remote']
+ default: local
+ type: str
+ remote_system:
+ description:
+ - Details of remote system to which the replication is being configured.
+ - The I(remote_system) option should be specified if the I(replication_type) is C(remote).
+ type: dict
+ suboptions:
+ remote_system_host:
+ required: true
+ description:
+ - IP or FQDN for remote Unity unisphere Host.
+ type: str
+ remote_system_username:
+ type: str
+ required: true
+ description:
+ - User name of remote Unity unisphere Host.
+ remote_system_password:
+ type: str
+ required: true
+ description:
+ - Password of remote Unity unisphere Host.
+ remote_system_verifycert:
+ type: bool
+ default: true
+ description:
+ - Boolean variable to specify whether or not to validate SSL
+ certificate of remote Unity unisphere Host.
+ - C(true) - Indicates that the SSL certificate should be verified.
+ - C(false) - Indicates that the SSL certificate should not be
+ verified.
+ remote_system_port:
+ description:
+ - Port at which remote Unity unisphere is hosted.
+ type: int
+ default: 443
+ destination_pool_name:
+ description:
+ - Name of pool to allocate destination Luns.
+ - Mutually exclusive with I(destination_pool_id).
+ type: str
+ destination_pool_id:
+ description:
+ - Id of pool to allocate destination Luns.
+ - Mutually exclusive with I(destination_pool_name).
+ type: str
+ replication_state:
+ description:
+ - State of the replication.
+ choices: ['enable', 'disable']
+ type: str
+ state:
+ description:
+ - Define whether the consistency group should exist or not.
+ choices: [absent, present]
+ required: true
+ type: str
+notes:
+ - The I(check_mode) is not supported.
+"""
+
+EXAMPLES = r"""
+- name: Create consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ cg_name: "{{cg_name}}"
+ description: "{{description}}"
+ snap_schedule: "{{snap_schedule1}}"
+ state: "present"
+
+- name: Get details of consistency group using id
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "{{cg_id}}"
+ state: "present"
+
+- name: Add volumes to consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "{{cg_id}}"
+ volumes:
+ - vol_name: "Ansible_Test-3"
+ - vol_id: "sv_1744"
+ vol_state: "{{vol_state_present}}"
+ state: "present"
+
+- name: Rename consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "{{cg_name}}"
+ new_cg_name: "{{new_cg_name}}"
+ state: "present"
+
+- name: Modify consistency group details
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "{{new_cg_name}}"
+ snap_schedule: "{{snap_schedule2}}"
+ tiering_policy: "{{tiering_policy1}}"
+ state: "present"
+
+- name: Map hosts to a consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "{{cg_id}}"
+ hosts:
+ - host_name: "10.226.198.248"
+ - host_id: "Host_511"
+ mapping_state: "mapped"
+ state: "present"
+
+- name: Unmap hosts from a consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "{{cg_id}}"
+ hosts:
+ - host_id: "Host_511"
+ - host_name: "10.226.198.248"
+ mapping_state: "unmapped"
+ state: "present"
+
+- name: Remove volumes from consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "{{new_cg_name}}"
+ volumes:
+ - vol_name: "Ansible_Test-3"
+ - vol_id: "sv_1744"
+ vol_state: "{{vol_state_absent}}"
+ state: "present"
+
+- name: Delete consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "{{new_cg_name}}"
+ state: "absent"
+
+- name: Enable replication for consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_id: "cg_id_1"
+ replication_params:
+ destination_cg_name: "destination_cg_1"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "remote"
+ remote_system:
+ remote_system_host: '10.1.2.3'
+ remote_system_verifycert: False
+ remote_system_username: 'username'
+ remote_system_password: 'password'
+ destination_pool_name: "pool_test_1"
+ replication_state: "enable"
+ state: "present"
+
+- name: Disable replication for consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ cg_name: "dis_repl_ans_source"
+ replication_state: "disable"
+ state: "present"
+"""
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+
+consistency_group_details:
+ description: Details of the consistency group.
+ returned: When consistency group exists
+ type: dict
+ contains:
+ id:
+ description: The system ID given to the consistency group.
+ type: str
+ relocation_policy:
+ description: FAST VP tiering policy for the consistency group.
+ type: str
+ cg_replication_enabled:
+ description: Whether or not the replication is enabled..
+ type: bool
+ snap_schedule:
+ description: Snapshot schedule applied to consistency group.
+ type: dict
+ contains:
+ UnitySnapSchedule:
+ description: Snapshot schedule applied to consistency
+ group.
+ type: dict
+ contains:
+ id:
+ description: The system ID given to the
+ snapshot schedule.
+ type: str
+ name:
+ description: The name of the snapshot schedule.
+ type: str
+ luns:
+ description: Details of volumes part of consistency group.
+ type: dict
+ contains:
+ UnityLunList:
+ description: List of volumes part of consistency group.
+ type: list
+ contains:
+ UnityLun:
+ description: Detail of volume.
+ type: dict
+ contains:
+ id:
+ description: The system ID given to volume.
+ type: str
+ name:
+ description: The name of the volume.
+ type: str
+ snapshots:
+ description: List of snapshots of consistency group.
+ type: list
+ contains:
+ name:
+ description: Name of the snapshot.
+ type: str
+ creation_time:
+ description: Date and time on which the snapshot was taken.
+ type: str
+ expirationTime:
+ description: Date and time after which the snapshot will expire.
+ type: str
+ storageResource:
+ description: Storage resource for which the snapshot was
+ taken.
+ type: dict
+ contains:
+ UnityStorageResource:
+ description: Details of the storage resource.
+ type: dict
+ contains:
+ id:
+ description: The id of the storage
+ resource.
+ type: str
+ block_host_access:
+ description: Details of hosts mapped to the consistency group.
+ type: dict
+ contains:
+ UnityBlockHostAccessList:
+ description: List of hosts mapped to consistency group.
+ type: list
+ contains:
+ UnityBlockHostAccess:
+ description: Details of host.
+ type: dict
+ contains:
+ id:
+ description: The ID of the host.
+ type: str
+ name:
+ description: The name of the host.
+ type: str
+ sample: {
+ "advanced_dedup_status": "DedupStatusEnum.DISABLED",
+ "block_host_access": null,
+ "cg_replication_enabled": false,
+ "data_reduction_percent": 0,
+ "data_reduction_ratio": 1.0,
+ "data_reduction_size_saved": 0,
+ "data_reduction_status": "DataReductionStatusEnum.DISABLED",
+ "datastores": null,
+ "dedup_status": null,
+ "description": "Ansible testing",
+ "esx_filesystem_block_size": null,
+ "esx_filesystem_major_version": null,
+ "existed": true,
+ "filesystem": null,
+ "hash": 8776023812033,
+ "health": {
+ "UnityHealth": {
+ "hash": 8776023811889
+ }
+ },
+ "host_v_vol_datastore": null,
+ "id": "res_7477",
+ "is_replication_destination": false,
+ "is_snap_schedule_paused": null,
+ "luns": null,
+ "metadata_size": 0,
+ "metadata_size_allocated": 0,
+ "name": "Ansible_CG_Testing",
+ "per_tier_size_used": null,
+ "pools": null,
+ "relocation_policy": "TieringPolicyEnum.MIXED",
+ "replication_type": "ReplicationTypeEnum.NONE",
+ "size_allocated": 0,
+ "size_total": 0,
+ "size_used": null,
+ "snap_count": 0,
+ "snap_schedule": null,
+ "snaps_size_allocated": 0,
+ "snaps_size_total": 0,
+ "snapshots": [],
+ "thin_status": "ThinStatusEnum.FALSE",
+ "type": "StorageResourceTypeEnum.CONSISTENCY_GROUP",
+ "virtual_volumes": null,
+ "vmware_uuid": null
+ }
+'''
+
+import logging
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('consistencygroup',
+ log_devel=logging.INFO)
+
+application_type = "Ansible/1.6.0"
+
+
+class ConsistencyGroup(object):
+ """Class with consistency group operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_consistencygroup_parameters())
+
+ mutually_exclusive = [['cg_name', 'cg_id']]
+ required_one_of = [['cg_name', 'cg_id']]
+ required_together = [['volumes', 'vol_state'], ['hosts', 'mapping_state']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of,
+ required_together=required_together
+ )
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+
+ def return_cg_instance(self, cg_name):
+ """Return the consistency group instance.
+ :param cg_name: The name of the consistency group
+ :return: Instance of the consistency group
+ """
+
+ try:
+ cg_details = self.unity_conn.get_cg(name=cg_name)
+ cg_id = cg_details.get_id()
+ cg_obj = utils.cg.UnityConsistencyGroup.get(self.unity_conn._cli,
+ cg_id)
+ return cg_obj
+
+ except Exception as e:
+ msg = "Failed to get the consistency group {0} instance with " \
+ "error {1}".format(cg_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_details(self, cg_id=None, cg_name=None):
+ """Get consistency group details.
+ :param cg_id: The id of the consistency group
+ :param cg_name: The name of the consistency group
+ :return: Dict containing consistency group details if exists
+ """
+
+ id_or_name = cg_id if cg_id else cg_name
+ errormsg = "Failed to get details of consistency group {0} with" \
+ " error {1}"
+
+ try:
+ cg_details = self.unity_conn.get_cg(_id=cg_id, name=cg_name)
+ if cg_name is None:
+ cg_name = cg_details.name
+
+ if cg_details.existed:
+ cg_obj = self.return_cg_instance(cg_name)
+ snapshots = cg_obj.snapshots
+
+ snapshot_list = [snap._get_properties() for snap in snapshots]
+
+ cg_ret_details = cg_details._get_properties()
+
+ # Append details of host mapped to the consistency group
+ # in return response
+ if cg_ret_details['block_host_access']:
+ for i in range(len(cg_details.block_host_access)):
+ cg_ret_details['block_host_access']['UnityBlockHostAccessList'][i]['UnityBlockHostAccess'][
+ 'id'] = cg_details.block_host_access[i].host.id
+ cg_ret_details['block_host_access']['UnityBlockHostAccessList'][i]['UnityBlockHostAccess'][
+ 'name'] = cg_details.block_host_access[i].host.name
+ cg_ret_details['snapshots'] = snapshot_list
+
+ # Add volume name to the dict
+ if cg_ret_details['luns'] is not None:
+ for i in range(len(cg_details.luns)):
+ cg_ret_details['luns']['UnityLunList'][i]['UnityLun'][
+ 'name'] = cg_details.luns[i].name
+
+ # Add snapshot schedule name to the dict
+ if cg_ret_details['snap_schedule'] is not None:
+ cg_ret_details['snap_schedule']['UnitySnapSchedule'][
+ 'name'] = cg_details.snap_schedule.name
+
+ # Status of cg replication
+ cg_ret_details['cg_replication_enabled'] = True if cg_details.check_cg_is_replicated() else False
+
+ return cg_ret_details
+ else:
+ LOG.info("Failed to get details of consistency group %s",
+ id_or_name)
+ return None
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ auth_err = "Incorrect username or password, {0}".format(
+ e.message)
+ msg = errormsg.format(id_or_name, auth_err)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ else:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ return None
+
+ except Exception as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_host_id_by_name(self, host_name):
+ """ Get host ID by host name
+ :param host_name: str
+ :return: unity host ID
+ :rtype: str
+ """
+ try:
+ host_obj = self.unity_conn.get_host(name=host_name)
+ if host_obj and host_obj.existed:
+ return host_obj.id
+ else:
+ msg = "Host name: %s does not exists" % host_name
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to get host ID by name: %s error: %s" % (
+ host_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_volume_details(self, vol_name=None, vol_id=None):
+ """Get the details of a volume.
+ :param vol_name: The name of the volume
+ :param vol_id: The id of the volume
+ :return: Dict containing volume details if exists
+ """
+
+ id_or_name = vol_id if vol_id else vol_name
+
+ try:
+ lun = self.unity_conn.get_lun(name=vol_name, _id=vol_id)
+
+ cg = None
+ if lun.existed:
+ lunid = lun.get_id()
+ unitylun = utils.UnityLun.get(self.unity_conn._cli, lunid)
+ if unitylun.cg is not None:
+ cg = unitylun.cg
+ else:
+ errormsg = "The volume {0} not found.".format(id_or_name)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ cg_details = self.get_details(
+ cg_id=self.module.params['cg_id'],
+ cg_name=self.module.params['cg_name'])
+
+ # Check if volume is already part of another consistency group
+ if cg is None:
+ return lun._get_properties()['id']
+
+ errormsg = "The volume {0} is already part of consistency group" \
+ " {1}".format(id_or_name, cg.name)
+
+ if cg_details is None:
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ if cg.id != cg_details['id']:
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ return lun._get_properties()['id']
+
+ except Exception as e:
+ msg = "Failed to get the volume {0} with error {1}".format(
+ id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def remove_volumes_from_cg(self, cg_name, volumes):
+ """Remove volumes from consistency group.
+ :param cg_name: The name of the consistency group
+ :param volumes: The list of volumes to be removed
+ :return: Boolean value to indicate if volumes are removed from
+ consistency group
+ """
+
+ cg_details = self.unity_conn.get_cg(name=cg_name)._get_properties()
+ existing_volumes_in_cg = cg_details['luns']
+ existing_vol_ids = []
+
+ if existing_volumes_in_cg:
+ existing_vol_ids = [vol['UnityLun']['id'] for vol in
+ existing_volumes_in_cg['UnityLunList']]
+
+ ids_to_remove = []
+ vol_name_list = []
+ vol_id_list = []
+
+ for vol in volumes:
+ if 'vol_id' in vol and not (vol['vol_id'] in vol_id_list):
+ vol_id_list.append(vol['vol_id'])
+ elif 'vol_name' in vol and not (vol['vol_name'] in vol_name_list):
+ vol_name_list.append(vol['vol_name'])
+
+ """remove volume by name"""
+ for vol in vol_name_list:
+ ids_to_remove.append(self.get_volume_details(vol_name=vol))
+
+ vol_id_list = list(set(vol_id_list + ids_to_remove))
+ ids_to_remove = list(set(existing_vol_ids).intersection(set(vol_id_list)))
+
+ LOG.info("Volume IDs to remove %s", ids_to_remove)
+
+ if len(ids_to_remove) == 0:
+ return False
+
+ vol_remove_list = []
+ for vol in ids_to_remove:
+ vol_dict = {"id": vol}
+ vol_remove_list.append(vol_dict)
+
+ cg_obj = self.return_cg_instance(cg_name)
+
+ try:
+ cg_obj.modify(lun_remove=vol_remove_list)
+ return True
+ except Exception as e:
+ errormsg = "Remove existing volumes from consistency group {0} " \
+ "failed with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def add_volumes_to_cg(self, cg_name, volumes, tiering_policy):
+ """Add volumes to consistency group.
+ :param cg_name: The name of the consistency group
+ :param volumes: The list of volumes to be added to consistency
+ group
+ :param tiering_policy: The tiering policy that is to be applied to
+ consistency group
+ :return: The boolean value to indicate if volumes are added to
+ consistency group
+ """
+
+ cg_details = self.unity_conn.get_cg(name=cg_name)._get_properties()
+ existing_volumes_in_cg = cg_details['luns']
+ existing_vol_ids = []
+
+ if existing_volumes_in_cg:
+ existing_vol_ids = [vol['UnityLun']['id'] for vol in
+ existing_volumes_in_cg['UnityLunList']]
+
+ ids_to_add = []
+ vol_name_list = []
+ vol_id_list = []
+ all_vol_ids = []
+
+ for vol in volumes:
+ if 'vol_id' in vol and not (vol['vol_id'] in vol_id_list):
+ vol_id_list.append(vol['vol_id'])
+ elif 'vol_name' in vol and not (vol['vol_name'] in vol_name_list):
+ vol_name_list.append(vol['vol_name'])
+
+ """add volume by name"""
+ for vol in vol_name_list:
+ ids_to_add.append(self.get_volume_details(vol_name=vol))
+
+ """add volume by id"""
+ for vol in vol_id_list:
+ """verifying if volume id exists in array"""
+ ids_to_add.append(self.get_volume_details(vol_id=vol))
+
+ all_vol_ids = ids_to_add + existing_vol_ids
+ ids_to_add = list(set(all_vol_ids) - set(existing_vol_ids))
+
+ LOG.info("Volume IDs to add %s", ids_to_add)
+
+ if len(ids_to_add) == 0:
+ return False
+
+ vol_add_list = []
+ for vol in ids_to_add:
+ vol_dict = {"id": vol}
+ vol_add_list.append(vol_dict)
+
+ cg_obj = self.return_cg_instance(cg_name)
+
+ policy_enum = None
+ if tiering_policy:
+ if utils.TieringPolicyEnum[tiering_policy]:
+ policy_enum = utils.TieringPolicyEnum[tiering_policy]
+ else:
+ errormsg = "Invalid choice {0} for tiering policy".format(
+ tiering_policy)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ try:
+ cg_obj.modify(lun_add=vol_add_list, tiering_policy=policy_enum)
+ return True
+ except Exception as e:
+ errormsg = "Add existing volumes to consistency group {0} " \
+ "failed with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def map_hosts_to_cg(self, cg_name, add_hosts):
+ """Map hosts to consistency group.
+ :param cg_name: The name of the consistency group
+ :param add_hosts: List of hosts that are to be mapped to cg
+ :return: Boolean value to indicate if hosts were mapped to cg
+ """
+ cg_details = self.unity_conn.get_cg(name=cg_name)
+ existing_volumes_in_cg = cg_details.luns
+
+ existing_hosts_in_cg = cg_details.block_host_access
+ existing_host_ids = []
+
+ """Get list of existing hosts in consistency group"""
+ if existing_hosts_in_cg:
+ for i in range(len(existing_hosts_in_cg)):
+ existing_host_ids.append(existing_hosts_in_cg[i].host.id)
+
+ host_id_list = []
+ host_name_list = []
+ add_hosts_id = []
+ host_add_list = []
+ all_hosts = []
+
+ for host in add_hosts:
+ if 'host_id' in host and not (host['host_id'] in host_id_list):
+ host_id_list.append(host['host_id'])
+ elif 'host_name' in host and not (host['host_name'] in host_name_list):
+ host_name_list.append(host['host_name'])
+
+ """add hosts by name"""
+ for host_name in host_name_list:
+ add_hosts_id.append(self.get_host_id_by_name(host_name))
+
+ all_hosts = host_id_list + existing_host_ids + add_hosts_id
+ add_hosts_id = list(set(all_hosts) - set(existing_host_ids))
+
+ if len(add_hosts_id) == 0:
+ return False
+
+ if existing_volumes_in_cg:
+
+ for host_id in add_hosts_id:
+ host_dict = {"id": host_id}
+ host_add_list.append(host_dict)
+
+ LOG.info("List of hosts to be added to consistency group "
+ "%s ", host_add_list)
+ cg_obj = self.return_cg_instance(cg_name)
+ try:
+ cg_obj.modify(name=cg_name, host_add=host_add_list)
+ return True
+ except Exception as e:
+ errormsg = "Adding host to consistency group {0} " \
+ "failed with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def unmap_hosts_to_cg(self, cg_name, remove_hosts):
+ """Unmap hosts to consistency group.
+ :param cg_name: The name of the consistency group
+ :param remove_hosts: List of hosts that are to be unmapped from cg
+ :return: Boolean value to indicate if hosts were mapped to cg
+ """
+ cg_details = self.unity_conn.get_cg(name=cg_name)
+ existing_hosts_in_cg = cg_details.block_host_access
+ existing_host_ids = []
+
+ """Get host ids existing in consistency group"""
+ if existing_hosts_in_cg:
+ for i in range(len(existing_hosts_in_cg)):
+ existing_host_ids.append(existing_hosts_in_cg[i].host.id)
+
+ host_remove_list = []
+ host_id_list = []
+ host_name_list = []
+ remove_hosts_id = []
+
+ for host in remove_hosts:
+ if 'host_id' in host and not (host['host_id'] in host_id_list):
+ host_id_list.append(host['host_id'])
+ elif 'host_name' in host and not (host['host_name'] in host_name_list):
+ host_name_list.append(host['host_name'])
+
+ """remove hosts by name"""
+ for host in host_name_list:
+ remove_hosts_id.append(self.get_host_id_by_name(host))
+
+ host_id_list = list(set(host_id_list + remove_hosts_id))
+ remove_hosts_id = list(set(existing_host_ids).intersection(set(host_id_list)))
+
+ if len(remove_hosts_id) == 0:
+ return False
+
+ for host in remove_hosts_id:
+ host_dict = {"id": host}
+ host_remove_list.append(host_dict)
+ cg_obj = self.return_cg_instance(cg_name)
+ try:
+ cg_obj.modify(name=cg_name, host_remove=host_remove_list)
+ return True
+ except Exception as e:
+ errormsg = "Removing host from consistency group {0} " \
+ "failed with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def rename_cg(self, cg_name, new_cg_name):
+ """Rename consistency group.
+ :param cg_name: The name of the consistency group
+ :param new_cg_name: The new name of the consistency group
+ :return: Boolean value to indicate if consistency group renamed
+ """
+ cg_obj = self.return_cg_instance(cg_name)
+
+ try:
+ cg_obj.modify(name=new_cg_name)
+ return True
+ except Exception as e:
+ errormsg = "Rename operation of consistency group {0} failed " \
+ "with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def is_cg_modified(self, cg_details):
+ """Check if the desired consistency group state is different from
+ existing consistency group.
+ :param cg_details: The dict containing consistency group details
+ :return: Boolean value to indicate if modification is needed
+ """
+ modified = False
+
+ if self.module.params['tiering_policy'] and cg_details['luns'] is \
+ None and self.module.params['volumes'] is None:
+ self.module.fail_json(msg="The system cannot assign a tiering"
+ " policy to an empty consistency group."
+ )
+
+ if self.module.params['hosts'] and cg_details['luns'] is \
+ None and self.module.params['volumes'] is None:
+ self.module.fail_json(msg="The system cannot assign hosts"
+ " to an empty consistency group.")
+
+ if ((cg_details['description'] is not None and
+ self.module.params['description'] is not None and
+ cg_details['description'] != self.module.params['description'])
+ or (cg_details['description'] is None and
+ self.module.params['description'] is not None)) or \
+ ((cg_details['snap_schedule'] is not None and
+ self.module.params['snap_schedule'] is not None and
+ cg_details['snap_schedule']['UnitySnapSchedule']['name'] !=
+ self.module.params['snap_schedule']) or
+ (cg_details['snap_schedule'] is None and
+ self.module.params['snap_schedule'])):
+ modified = True
+
+ if cg_details['relocation_policy']:
+ tier_policy = cg_details['relocation_policy'].split('.')
+ if self.module.params['tiering_policy'] is not None and \
+ tier_policy[1] != self.module.params['tiering_policy']:
+ modified = True
+
+ return modified
+
+ def create_cg(self, cg_name, description, snap_schedule):
+ """Create a consistency group.
+ :param cg_name: The name of the consistency group
+ :param description: The description of the consistency group
+ :param snap_schedule: The name of the snapshot schedule
+ :return: The boolean value to indicate if consistency group
+ created and also returns the CG object
+ """
+
+ try:
+ if snap_schedule is not None:
+ snap_schedule = {"name": snap_schedule}
+
+ cg_obj = utils.cg.UnityConsistencyGroup.create(
+ self.unity_conn._cli, name=cg_name, description=description,
+ snap_schedule=snap_schedule)
+ return True, cg_obj
+ except Exception as e:
+ errormsg = "Create operation of consistency group {0} failed" \
+ " with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def modify_cg(self, cg_name, description, snap_schedule, tiering_policy):
+ """Modify consistency group.
+ :param cg_name: The name of the consistency group
+ :param description: The description of the consistency group
+ :param snap_schedule: The name of the snapshot schedule
+ :param tiering_policy: The tiering policy that is to be applied to
+ consistency group
+ :return: The boolean value to indicate if consistency group
+ modified
+ """
+ cg_obj = self.return_cg_instance(cg_name)
+ is_snap_schedule_paused = None
+
+ if self.module.params['snap_schedule'] == "":
+ is_snap_schedule_paused = False
+
+ if snap_schedule is not None:
+ if snap_schedule == "":
+ snap_schedule = {"name": None}
+ else:
+ snap_schedule = {"name": snap_schedule}
+
+ policy_enum = None
+ if tiering_policy:
+ if utils.TieringPolicyEnum[tiering_policy]:
+ policy_enum = utils.TieringPolicyEnum[tiering_policy]
+ else:
+ errormsg = "Invalid choice {0} for tiering policy".format(
+ tiering_policy)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ try:
+ cg_obj.modify(description=description,
+ snap_schedule=snap_schedule,
+ tiering_policy=policy_enum,
+ is_snap_schedule_paused=is_snap_schedule_paused)
+ return True
+
+ except Exception as e:
+ errormsg = "Modify operation of consistency group {0} failed " \
+ "with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def delete_cg(self, cg_name):
+ """Delete consistency group.
+ :param cg_name: The name of the consistency group
+ :return: The boolean value to indicate if consistency group deleted
+ """
+ cg_obj = self.return_cg_instance(cg_name)
+
+ try:
+ cg_obj.delete()
+ return True
+
+ except Exception as e:
+ errormsg = "Delete operation of consistency group {0} failed " \
+ "with error {1}".format(cg_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def refine_volumes(self, volumes):
+ """Refine volumes.
+ :param volumes: Volumes that is to be added/removed
+ :return: List of volumes with each volume being identified with either
+ vol_id or vol_name
+ """
+ for vol in volumes:
+ if vol['vol_id'] is not None and vol['vol_name'] is None:
+ del vol['vol_name']
+ elif vol['vol_name'] is not None and vol['vol_id'] is None:
+ del vol['vol_id']
+ return volumes
+
+ def refine_hosts(self, hosts):
+ """Refine hosts.
+ :param hosts: Hosts that is to be mapped/unmapped
+ :return: List of hosts with each host being identified with either
+ host_id or host_name
+ """
+ for host in hosts:
+ if host['host_id'] is not None and host['host_name'] is None:
+ del host['host_name']
+ elif host['host_name'] is not None and host['host_id'] is None:
+ del host['host_id']
+ return hosts
+
+ def validate_volumes(self, volumes):
+ """Validate the volumes.
+ :param volumes: List of volumes
+ """
+
+ for vol in volumes:
+ if ('vol_id' in vol) and ('vol_name' in vol):
+ errormsg = "Both name and id are found for volume {0}. No" \
+ " action would be taken. Please specify either" \
+ " name or id.".format(vol)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ elif 'vol_id' in vol and (len(vol['vol_id'].strip()) == 0):
+ errormsg = "vol_id is blank. Please specify valid vol_id."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ elif 'vol_name' in vol and (len(vol.get('vol_name').strip()) == 0):
+ errormsg = "vol_name is blank. Please specify valid vol_name."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ elif 'vol_name' in vol:
+ self.get_volume_details(vol_name=vol['vol_name'])
+ elif 'vol_id' in vol:
+ self.get_volume_details(vol_id=vol['vol_id'])
+ else:
+ errormsg = "Expected either vol_name or vol_id, found" \
+ " neither for volume {0}".format(vol)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_hosts(self, hosts):
+ """Validate hosts.
+ :param hosts: List of hosts
+ """
+
+ for host in hosts:
+ if ('host_id' in host) and ('host_name' in host):
+ errormsg = "Both name and id are found for host {0}. No" \
+ " action would be taken. Please specify either" \
+ " name or id.".format(host)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ elif 'host_id' in host and (len(host['host_id'].strip()) == 0):
+ errormsg = "host_id is blank. Please specify valid host_id."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ elif 'host_name' in host and (len(host.get('host_name').strip()) == 0):
+ errormsg = "host_name is blank. Please specify valid host_name."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ elif 'host_name' in host:
+ self.get_host_id_by_name(host_name=host['host_name'])
+ elif 'host_id' in host:
+ host_obj = self.unity_conn.get_host(_id=host['host_id'])
+ if host_obj is None or host_obj.existed is False:
+ msg = "Host id: %s does not exists" % host['host_id']
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ else:
+ errormsg = "Expected either host_name or host_id, found" \
+ " neither for host {0}".format(host)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def update_replication_params(self, replication):
+ ''' Update replication params '''
+
+ if 'replication_type' in replication and replication['replication_type'] == 'remote':
+ connection_params = {
+ 'unispherehost': replication['remote_system']['remote_system_host'],
+ 'username': replication['remote_system']['remote_system_username'],
+ 'password': replication['remote_system']['remote_system_password'],
+ 'validate_certs': replication['remote_system']['remote_system_verifycert'],
+ 'port': replication['remote_system']['remote_system_port']
+ }
+ remote_system_conn = utils.get_unity_unisphere_connection(
+ connection_params, application_type)
+ replication['remote_system_name'] = remote_system_conn.name
+ if replication['destination_pool_name'] is not None:
+ pool_object = remote_system_conn.get_pool(name=replication['destination_pool_name'])
+ replication['destination_pool_id'] = pool_object.id
+ else:
+ if replication['destination_pool_name'] is not None:
+ pool_object = self.unity_conn.get_pool(name=replication['destination_pool_name'])
+ replication['destination_pool_id'] = pool_object.id
+
+ def get_destination_cg_luns(self, source_lun_list):
+ ''' Form destination cg lun list '''
+ destination_cg_lun_list = []
+ if source_lun_list is not None:
+ for source_lun in source_lun_list:
+ destination_cg_lun_info = utils.UnityStorageResource()
+ destination_cg_lun_info.name = "DR_" + source_lun.name
+ destination_cg_lun_info.is_thin_enabled = source_lun.is_thin_enabled
+ destination_cg_lun_info.size_total = source_lun.size_total
+ destination_cg_lun_info.id = source_lun.id
+ destination_cg_lun_info.is_data_reduction_enabled = source_lun.is_data_reduction_enabled
+ destination_cg_lun_list.append(destination_cg_lun_info)
+ return destination_cg_lun_list
+
+ def enable_cg_replication(self, cg_name, replication):
+ ''' Add replication to the consistency group '''
+ try:
+ # Validate replication params
+ self.validate_cg_replication_params(replication)
+
+ # Get cg instance
+ cg_object = self.return_cg_instance(cg_name)
+
+ # Check if replication is enabled for cg
+ if cg_object.check_cg_is_replicated():
+ return False
+
+ # Update replication params
+ self.update_replication_params(replication)
+
+ # Get destination pool id
+ replication_args_list = {
+ 'dst_pool_id': replication['destination_pool_id']
+ }
+
+ # Get replication mode
+ if 'replication_mode' in replication and replication['replication_mode'] == 'asynchronous':
+ replication_args_list['max_time_out_of_sync'] = replication['rpo']
+ else:
+ replication_args_list['max_time_out_of_sync'] = -1
+
+ # Get remote system
+ if 'replication_type' in replication and replication['replication_type'] == 'remote':
+ remote_system_name = replication['remote_system_name']
+ remote_system_list = self.unity_conn.get_remote_system()
+ for remote_system in remote_system_list:
+ if remote_system.name == remote_system_name:
+ replication_args_list['remote_system'] = remote_system
+ break
+ if 'remote_system' not in replication_args_list.keys():
+ errormsg = "Remote system %s is not found" % (remote_system_name)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ # Form destination LUNs list
+ source_lun_list = cg_object.luns
+ replication_args_list['source_luns'] = self.get_destination_cg_luns(source_lun_list)
+
+ # Form destination cg name
+ if 'destination_cg_name' in replication and replication['destination_cg_name'] is not None:
+ replication_args_list['dst_cg_name'] = replication['destination_cg_name']
+ else:
+ replication_args_list['dst_cg_name'] = "DR_" + cg_object.name
+
+ LOG.info(("Enabling replication to the consistency group %s", cg_object.name))
+ cg_object.replicate_cg_with_dst_resource_provisioning(**replication_args_list)
+ return True
+ except Exception as e:
+ errormsg = "Enabling replication to the consistency group %s failed " \
+ "with error %s" % (cg_object.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def disable_cg_replication(self, cg_name):
+ ''' Remove replication from the consistency group '''
+ try:
+ # Get cg instance
+ cg_object = self.return_cg_instance(cg_name)
+
+ # Check if replication is enabled for cg
+ if not cg_object.check_cg_is_replicated():
+ return False
+
+ LOG.info(("Disabling replication from the consistency group %s", cg_object.name))
+ curr_cg_repl_session = self.unity_conn.get_replication_session(src_resource_id=cg_object.id)
+ for repl_session in curr_cg_repl_session:
+ repl_session.delete()
+ return True
+ except Exception as e:
+ errormsg = "Disabling replication to the consistency group %s failed " \
+ "with error %s" % (cg_object.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on consistency group module based on
+ parameters chosen in playbook
+ """
+ cg_name = self.module.params['cg_name']
+ cg_id = self.module.params['cg_id']
+ description = self.module.params['description']
+ volumes = self.module.params['volumes']
+ snap_schedule = self.module.params['snap_schedule']
+ new_cg_name = self.module.params['new_cg_name']
+ tiering_policy = self.module.params['tiering_policy']
+ vol_state = self.module.params['vol_state']
+ hosts = self.module.params['hosts']
+ mapping_state = self.module.params['mapping_state']
+ replication = self.module.params['replication_params']
+ replication_state = self.module.params['replication_state']
+ state = self.module.params['state']
+
+ # result is a dictionary that contains changed status and consistency
+ # group details
+ result = dict(
+ changed=False,
+ create_cg='',
+ modify_cg='',
+ rename_cg='',
+ add_vols_to_cg='',
+ remove_vols_from_cg='',
+ delete_cg='',
+ add_hosts_to_cg='',
+ remove_hosts_from_cg='',
+ consistency_group_details={}
+ )
+ cg_details = self.get_details(cg_id=cg_id, cg_name=cg_name)
+
+ if cg_name is None and cg_details:
+ cg_id = None
+ cg_name = cg_details['name']
+ if volumes:
+ volumes = self.refine_volumes(volumes)
+ self.validate_volumes(volumes)
+ if hosts:
+ hosts = self.refine_hosts(hosts)
+ self.validate_hosts(hosts)
+
+ modified = False
+
+ if cg_details:
+ modified = self.is_cg_modified(cg_details)
+
+ if vol_state and not volumes:
+ self.module.fail_json(msg="Please specify volumes along with vol_state")
+
+ if mapping_state and not hosts:
+ self.module.fail_json(msg="Please specify hosts along with mapping_state")
+
+ if replication and replication_state is None:
+ self.module.fail_json(msg="Please specify replication_state along with replication_params")
+
+ if state == 'present' and not cg_details:
+ if not volumes and tiering_policy:
+ self.module.fail_json(msg="The system cannot assign a"
+ " tiering policy to an empty"
+ " consistency group")
+ if not volumes and hosts:
+ self.module.fail_json(msg="The system cannot assign"
+ " hosts to an empty"
+ " consistency group")
+
+ if not cg_name:
+ msg = "The parameter cg_name length is 0. It is too short." \
+ " The min length is 1."
+ self.module.fail_json(msg=msg)
+
+ if new_cg_name:
+ self.module.fail_json(msg="Invalid argument, new_cg_name is"
+ " not required")
+
+ result['create_cg'], cg_details = self.create_cg(
+ cg_name, description, snap_schedule)
+ elif state == 'absent' and cg_details:
+ if cg_details['cg_replication_enabled']:
+ self.module.fail_json(msg="Consistency group cannot be deleted"
+ " because it is participating"
+ " in a replication session.")
+ if cg_details['luns']:
+ self.module.fail_json(msg="Please remove all volumes which"
+ " are part of consistency group"
+ " before deleting it.")
+ result['delete_cg'] = self.delete_cg(cg_name)
+
+ if state == 'present' and vol_state == 'present-in-group' and \
+ cg_details and volumes:
+ result['add_vols_to_cg'] = self.add_volumes_to_cg(cg_name,
+ volumes,
+ tiering_policy)
+ elif state == 'present' and vol_state == 'absent-in-group' and \
+ cg_details and volumes:
+ result['remove_vols_from_cg'] = self.remove_volumes_from_cg(
+ cg_name, volumes)
+
+ if hosts and mapping_state == 'mapped' and \
+ cg_details:
+ result['add_hosts_to_cg'] = self.map_hosts_to_cg(cg_name, hosts)
+
+ if hosts and mapping_state == 'unmapped' and \
+ cg_details:
+ result['remove_hosts_from_cg'] = self.unmap_hosts_to_cg(cg_name, hosts)
+
+ if state == 'present' and new_cg_name is not None:
+ if not new_cg_name:
+ msg = "The parameter new_cg_name length is 0. It is too" \
+ " short. The min length is 1."
+ self.module.fail_json(msg=msg)
+
+ if cg_name != new_cg_name:
+ result['rename_cg'] = self.rename_cg(cg_name, new_cg_name)
+ cg_name = new_cg_name
+
+ if state == 'present' and cg_details and modified:
+ result['modify_cg'] = self.modify_cg(cg_name, description,
+ snap_schedule, tiering_policy
+ )
+
+ if state == 'present' and cg_details and replication_state is not None:
+ if replication_state == 'enable':
+ result['changed'] = self.enable_cg_replication(cg_name, replication)
+ else:
+ result['changed'] = self.disable_cg_replication(cg_name)
+
+ if result['create_cg'] or result['modify_cg'] or result[
+ 'add_vols_to_cg'] or result['remove_vols_from_cg'] or result[
+ 'delete_cg'] or result['rename_cg'] or result[
+ 'add_hosts_to_cg'] or result['remove_hosts_from_cg']:
+ result['changed'] = True
+
+ result['consistency_group_details'] = self.get_details(cg_id=cg_id,
+ cg_name=cg_name
+ )
+
+ self.module.exit_json(**result)
+
+ def validate_destination_pool_info(self, replication):
+ if replication['destination_pool_id'] is not None and replication['destination_pool_name'] is not None:
+ errormsg = "'destination_pool_id' and 'destination_pool_name' is mutually exclusive."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ if replication['destination_pool_id'] is None and replication['destination_pool_name'] is None:
+ errormsg = "Either 'destination_pool_id' or 'destination_pool_name' is required."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_replication_mode(self, replication):
+ if 'replication_mode' in replication and replication['replication_mode'] == 'asynchronous':
+ if replication['rpo'] is None:
+ errormsg = "rpo is required together with 'asynchronous' replication_mode."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ if replication['rpo'] < 5 or replication['rpo'] > 1440:
+ errormsg = "rpo value should be in range of 5 to 1440"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_cg_replication_params(self, replication):
+ ''' Validate cg replication params '''
+ # Valdiate replication
+ if replication is None:
+ errormsg = "Please specify replication_params to enable replication."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ else:
+ self.validate_destination_pool_info(replication)
+ self.validate_replication_mode(replication)
+ # Validate replication type
+ if replication['replication_type'] == 'remote' and replication['remote_system'] is None:
+ errormsg = "remote_system is required together with 'remote' replication_type"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ # Validate destination cg name
+ if 'destination_cg_name' in replication and replication['destination_cg_name'] is not None:
+ dst_cg_name_length = len(replication['destination_cg_name'])
+ if dst_cg_name_length == 0 or dst_cg_name_length > 95:
+ errormsg = "destination_cg_name value should be in range of 1 to 95"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+
+def get_consistencygroup_parameters():
+ """This method provide parameters required for the ansible consistency
+ group module on Unity"""
+ return dict(
+ cg_name=dict(required=False, type='str'),
+ cg_id=dict(required=False, type='str'),
+ description=dict(required=False, type='str'),
+ volumes=dict(required=False, type='list', elements='dict',
+ options=dict(
+ vol_name=dict(type='str'),
+ vol_id=dict(type='str')
+ )
+ ),
+ snap_schedule=dict(required=False, type='str'),
+ new_cg_name=dict(required=False, type='str'),
+ tiering_policy=dict(required=False, type='str', choices=[
+ 'AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']),
+ vol_state=dict(required=False, type='str',
+ choices=['present-in-group', 'absent-in-group']),
+ hosts=dict(required=False, type='list', elements='dict',
+ options=dict(
+ host_name=dict(type='str'),
+ host_id=dict(type='str')
+ )),
+ mapping_state=dict(required=False, type='str',
+ choices=['mapped', 'unmapped']),
+ replication_params=dict(type='dict', options=dict(
+ destination_cg_name=dict(type='str'),
+ replication_mode=dict(type='str', choices=['asynchronous', 'manual'], required=True),
+ rpo=dict(type='int'),
+ replication_type=dict(type='str', choices=['local', 'remote'], default='local'),
+ remote_system=dict(type='dict',
+ options=dict(
+ remote_system_host=dict(type='str', required=True, no_log=True),
+ remote_system_verifycert=dict(type='bool', required=False,
+ default=True),
+ remote_system_username=dict(type='str', required=True),
+ remote_system_password=dict(type='str', required=True, no_log=True),
+ remote_system_port=dict(type='int', required=False, default=443, no_log=True)
+ )),
+ destination_pool_name=dict(type='str'),
+ destination_pool_id=dict(type='str')
+ )),
+ replication_state=dict(type='str', choices=['enable', 'disable']),
+ state=dict(required=True, type='str', choices=['present', 'absent'])
+ )
+
+
+def main():
+ """ Create Unity consistency group object and perform action on it
+ based on user input from playbook"""
+ obj = ConsistencyGroup()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/filesystem.py b/ansible_collections/dellemc/unity/plugins/modules/filesystem.py
new file mode 100644
index 000000000..b10f85386
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/filesystem.py
@@ -0,0 +1,1906 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing FileSystem on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+
+module: filesystem
+version_added: '1.1.0'
+short_description: Manage filesystem on Unity storage system
+description:
+- Managing filesystem on Unity storage system includes
+ Create new filesystem,
+ Modify snapschedule attribute of filesystem,
+ Modify filesystem attributes,
+ Display filesystem details,
+ Display filesystem snapshots,
+ Display filesystem snapschedule,
+ Delete snapschedule associated with the filesystem,
+ Delete filesystem,
+ Create new filesystem with quota configuration,
+ Enable, modify and disable replication.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Arindam Datta (@dattaarindam) <ansible.team@dell.com>
+- Meenakshi Dembi (@dembim) <ansible.team@dell.com>
+- Spandita Panigrahi (@panigs7) <ansible.team@dell.com>
+
+options:
+ filesystem_name:
+ description:
+ - The name of the filesystem. Mandatory only for the create operation.
+ All the operations are supported through I(filesystem_name).
+ - It is mutually exclusive with I(filesystem_id).
+ type: str
+ filesystem_id:
+ description:
+ - The id of the filesystem.
+ - It can be used only for get, modify, or delete operations.
+ - It is mutually exclusive with I(filesystem_name).
+ type: str
+ pool_name:
+ description:
+ - This is the name of the pool where the filesystem will be created.
+ - Either the I(pool_name) or I(pool_id) must be provided to create a new
+ filesystem.
+ type: str
+ pool_id:
+ description:
+ - This is the ID of the pool where the filesystem will be created.
+ - Either the I(pool_name) or I(pool_id) must be provided to create a new
+ filesystem.
+ type: str
+ size:
+ description:
+ - The size of the filesystem.
+ type: int
+ cap_unit:
+ description:
+ - The unit of the filesystem size. It defaults to C(GB), if not specified.
+ choices: ['GB' , 'TB']
+ type: str
+ nas_server_name:
+ description:
+ - Name of the NAS server on which filesystem will be hosted.
+ type: str
+ nas_server_id:
+ description:
+ - ID of the NAS server on which filesystem will be hosted.
+ type: str
+ supported_protocols:
+ description:
+ - Protocols supported by the file system.
+ - It will be overridden by NAS server configuration if NAS Server is C(Multiprotocol).
+ type: str
+ choices: ['NFS', 'CIFS', 'MULTIPROTOCOL']
+ description:
+ description:
+ - Description about the filesystem.
+ - Description can be removed by passing empty string ("").
+ type: str
+ smb_properties:
+ description:
+ - Advance settings for SMB. It contains optional candidate variables.
+ type: dict
+ suboptions:
+ is_smb_sync_writes_enabled:
+ description:
+ - Indicates whether the synchronous writes option is enabled on the
+ file system.
+ type: bool
+ is_smb_notify_on_access_enabled:
+ description:
+ - Indicates whether notifications of changes to directory file
+ structure are enabled.
+ type: bool
+ is_smb_op_locks_enabled:
+ description:
+ - Indicates whether opportunistic file locking is enabled on the file
+ system.
+ type: bool
+ is_smb_notify_on_write_enabled:
+ description:
+ - Indicates whether file write notifications are enabled on the file
+ system.
+ type: bool
+ smb_notify_on_change_dir_depth:
+ description:
+ - Integer variable, determines the lowest directory level to which
+ the enabled notifications apply.
+ - Minimum value is C(1).
+ type: int
+ data_reduction:
+ description:
+ - Boolean variable, specifies whether or not to enable compression.
+ Compression is supported only for thin filesystem.
+ type: bool
+ is_thin:
+ description:
+ - Boolean variable, specifies whether or not it is a thin filesystem.
+ type: bool
+ access_policy:
+ description:
+ - Access policy of a filesystem.
+ choices: ['NATIVE', 'UNIX', 'WINDOWS']
+ type: str
+ locking_policy:
+ description:
+ - File system locking policies. These policy choices control whether the
+ NFSv4 range locks must be honored.
+ type: str
+ choices: ['ADVISORY', 'MANDATORY']
+ tiering_policy:
+ description:
+ - Tiering policy choices for how the storage resource data will be
+ distributed among the tiers available in the pool.
+ choices: ['AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']
+ type: str
+ quota_config:
+ description:
+ - Configuration for quota management. It contains optional parameters.
+ type: dict
+ suboptions:
+ grace_period:
+ description:
+ - Grace period set in quota configuration after soft limit is reached.
+ - If I(grace_period) is not set during creation of filesystem,
+ it will be set to C(7 days) by default.
+ type: int
+ grace_period_unit:
+ description:
+ - Unit of grace period.
+ - Default unit is C(days).
+ type: str
+ choices: ['minutes', 'hours', 'days']
+ default_hard_limit:
+ description:
+ - Default hard limit for user quotas and tree quotas.
+ - If I(default_hard_limit) is not set while creation of filesystem,
+ it will be set to C(0B) by default.
+ type: int
+ default_soft_limit:
+ description:
+ - Default soft limit for user quotas and tree quotas.
+ - If I(default_soft_limit) is not set while creation of filesystem,
+ it will be set to C(0B) by default.
+ type: int
+ is_user_quota_enabled:
+ description:
+ - Indicates whether the user quota is enabled.
+ - If I(is_user_quota_enabled) is not set while creation of filesystem,
+ it will be set to C(false) by default.
+ - Parameters I(is_user_quota_enabled) and I(quota_policy) are
+ mutually exclusive.
+ type: bool
+ quota_policy:
+ description:
+ - Quota policy set in quota configuration.
+ - If I(quota_policy) is not set while creation of filesystem, it will
+ be set to C(FILE_SIZE) by default.
+ - Parameters I(is_user_quota_enabled) and I(quota_policy) are
+ mutually exclusive.
+ choices: ['FILE_SIZE','BLOCKS']
+ type: str
+ cap_unit:
+ description:
+ - Unit of I(default_soft_limit) and I(default_hard_limit) size.
+ - Default unit is C(GB).
+ choices: ['MB', 'GB', 'TB']
+ type: str
+ state:
+ description:
+ - State variable to determine whether filesystem will exist or not.
+ choices: ['absent', 'present']
+ required: true
+ type: str
+ snap_schedule_name:
+ description:
+ - This is the name of an existing snapshot schedule which is to be associated with the filesystem.
+ - This is mutually exclusive with I(snapshot_schedule_id).
+ type: str
+ snap_schedule_id:
+ description:
+ - This is the id of an existing snapshot schedule which is to be associated with the filesystem.
+ - This is mutually exclusive with I(snapshot_schedule_name).
+ type: str
+ replication_params:
+ description:
+ - Settings required for enabling or modifying replication.
+ type: dict
+ suboptions:
+ replication_name:
+ description:
+ - Name of the replication session.
+ type: str
+ new_replication_name:
+ description:
+ - Replication name to rename the session to.
+ type: str
+ replication_mode:
+ description:
+ - The replication mode.
+ - This is a mandatory field while creating a replication session.
+ type: str
+ choices: ['synchronous', 'asynchronous', 'manual']
+ rpo:
+ description:
+ - Maximum time to wait before the system syncs the source and destination LUNs.
+ - The I(rpo) option should be specified if the I(replication_mode) is C(asynchronous).
+ - The value should be in range of C(5) to C(1440) for C(asynchronous),
+ C(0) for C(synchronous) and C(-1) for C(manual).
+ type: int
+ replication_type:
+ description:
+ - Type of replication.
+ choices: ['local', 'remote']
+ type: str
+ remote_system:
+ description:
+ - Details of remote system to which the replication is being configured.
+ - The I(remote_system) option should be specified if the I(replication_type) is C(remote).
+ type: dict
+ suboptions:
+ remote_system_host:
+ required: true
+ description:
+ - IP or FQDN for remote Unity unisphere Host.
+ type: str
+ remote_system_username:
+ type: str
+ required: true
+ description:
+ - User name of remote Unity unisphere Host.
+ remote_system_password:
+ type: str
+ required: true
+ description:
+ - Password of remote Unity unisphere Host.
+ remote_system_verifycert:
+ type: bool
+ default: true
+ description:
+ - Boolean variable to specify whether or not to validate SSL
+ certificate of remote Unity unisphere Host.
+ - C(true) - Indicates that the SSL certificate should be verified.
+ - C(false) - Indicates that the SSL certificate should not be
+ verified.
+ remote_system_port:
+ description:
+ - Port at which remote Unity unisphere is hosted.
+ type: int
+ default: 443
+ destination_pool_id:
+ type: str
+ description:
+ - ID of pool to allocate destination filesystem.
+ destination_pool_name:
+ type: str
+ description:
+ - Name of pool to allocate destination filesystem.
+ replication_state:
+ description:
+ - State of the replication.
+ choices: ['enable', 'disable']
+ type: str
+
+notes:
+- SMB shares, NFS exports, and snapshots associated with filesystem need
+ to be deleted prior to deleting a filesystem.
+- The I(quota_config) parameter can be used to update default hard limit
+ and soft limit values to limit the maximum space that can be used.
+ By default they both are set to 0 during filesystem
+ creation which means unlimited.
+- The I(check_mode) is not supported.
+"""
+
+EXAMPLES = r"""
+- name: Create FileSystem
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ pool_name: "pool_1"
+ size: 5
+ state: "present"
+
+- name: Create FileSystem with quota configuration
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ pool_name: "pool_1"
+ size: 5
+ quota_config:
+ grace_period: 8
+ grace_period_unit: "days"
+ default_soft_limit: 10
+ is_user_quota_enabled: False
+ state: "present"
+
+- name: Expand FileSystem size
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ size: 10
+ state: "present"
+
+- name: Expand FileSystem size
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ size: 10
+ state: "present"
+
+- name: Modify FileSystem smb_properties
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "ansible_test_fs"
+ nas_server_name: "lglap761"
+ smb_properties:
+ is_smb_op_locks_enabled: True
+ smb_notify_on_change_dir_depth: 5
+ is_smb_notify_on_access_enabled: True
+ state: "present"
+
+- name: Modify FileSystem Snap Schedule
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_141"
+ snap_schedule_id: "{{snap_schedule_id}}"
+ state: "{{state_present}}"
+
+- name: Get details of FileSystem using id
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ state: "present"
+
+- name: Delete a FileSystem using id
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ state: "absent"
+
+- name: Enable replication on the fs
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ replication_params:
+ replication_name: "test_repl"
+ replication_type: "remote"
+ replication_mode: "asynchronous"
+ rpo: 60
+ remote_system:
+ remote_system_host: '0.1.2.3'
+ remote_system_verifycert: False
+ remote_system_username: 'username'
+ remote_system_password: 'password'
+ destination_pool_name: "pool_test_1"
+ replication_state: "enable"
+ state: "present"
+
+- name: Modify replication on the fs
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ replication_params:
+ replication_name: "test_repl"
+ new_replication_name: "test_repl_updated"
+ replication_mode: "asynchronous"
+ rpo: 50
+ replication_state: "enable"
+ state: "present"
+
+- name: Disable replication on the fs
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ replication_state: "disable"
+ state: "present"
+
+- name: Disable replication by specifying replication_name on the fs
+ dellemc.unity.filesystem:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "rs_405"
+ replication_params:
+ replication_name: "test_replication"
+ replication_state: "disable"
+ state: "present"
+"""
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+
+filesystem_details:
+ description: Details of the filesystem.
+ returned: When filesystem exists
+ type: dict
+ contains:
+ id:
+ description: The system generated ID given to the filesystem.
+ type: str
+ name:
+ description: Name of the filesystem.
+ type: str
+ description:
+ description: Description about the filesystem.
+ type: str
+ is_data_reduction_enabled:
+ description: Whether or not compression enabled on this
+ filesystem.
+ type: bool
+ size_total_with_unit:
+ description: Size of the filesystem with actual unit.
+ type: str
+ tiering_policy:
+ description: Tiering policy applied to this filesystem.
+ type: str
+ is_cifs_notify_on_access_enabled:
+ description: Indicates whether the system generates a
+ notification when a user accesses the file system.
+ type: bool
+ is_cifs_notify_on_write_enabled:
+ description: Indicates whether the system generates a notification
+ when the file system is written to.
+ type: bool
+ is_cifs_op_locks_enabled:
+ description: Indicates whether opportunistic file locks are enabled
+ for the file system.
+ type: bool
+ is_cifs_sync_writes_enabled:
+ description: Indicates whether the CIFS synchronous writes option
+ is enabled for the file system.
+ type: bool
+ cifs_notify_on_change_dir_depth:
+ description: Indicates the lowest directory level to which the
+ enabled notifications apply, if any.
+ type: int
+ pool:
+ description: The pool in which this filesystem is allocated.
+ type: dict
+ contains:
+ id:
+ description: The system ID given to the pool.
+ type: str
+ name:
+ description: The name of the storage pool.
+ type: str
+ nas_server:
+ description: The NAS Server details on which this filesystem is hosted.
+ type: dict
+ contains:
+ id:
+ description: The system ID given to the NAS Server.
+ type: str
+ name:
+ description: The name of the NAS Server.
+ type: str
+ snapshots:
+ description: The list of snapshots of this filesystem.
+ type: list
+ contains:
+ id:
+ description: The system ID given to the filesystem
+ snapshot.
+ type: str
+ name:
+ description: The name of the filesystem snapshot.
+ type: str
+ is_thin_enabled:
+ description: Indicates whether thin provisioning is enabled for
+ this filesystem.
+ type: bool
+ snap_schedule_id:
+ description: Indicates the id of the snap schedule associated
+ with the filesystem.
+ type: str
+ snap_schedule_name:
+ description: Indicates the name of the snap schedule associated
+ with the filesystem.
+ type: str
+ quota_config:
+ description: Details of quota configuration of the filesystem
+ created.
+ type: dict
+ contains:
+ grace_period:
+ description: Grace period set in quota configuration
+ after soft limit is reached.
+ type: str
+ default_hard_limit:
+ description: Default hard limit for user quotas
+ and tree quotas.
+ type: int
+ default_soft_limit:
+ description: Default soft limit for user quotas
+ and tree quotas.
+ type: int
+ is_user_quota_enabled:
+ description: Indicates whether the user quota is enabled.
+ type: bool
+ quota_policy:
+ description: Quota policy set in quota configuration.
+ type: str
+ replication_sessions:
+ description: List of replication sessions if replication is enabled.
+ type: dict
+ contains:
+ id:
+ description: ID of replication session
+ type: str
+ name:
+ description: Name of replication session
+ type: str
+ remote_system:
+ description: Remote system
+ type: dict
+ contains:
+ id:
+ description: ID of remote system
+ type: str
+ sample: {
+ "access_policy": "AccessPolicyEnum.UNIX",
+ "cifs_notify_on_change_dir_depth": 512,
+ "cifs_share": null,
+ "data_reduction_percent": 0,
+ "data_reduction_ratio": 1.0,
+ "data_reduction_size_saved": 0,
+ "description": "",
+ "existed": true,
+ "folder_rename_policy": "FSRenamePolicyEnum.SMB_RENAME_FORBIDDEN",
+ "format": "FSFormatEnum.UFS64",
+ "hash": 8735427610152,
+ "health": {
+ "UnityHealth": {
+ "hash": 8735427614928
+ }
+ },
+ "host_io_size": "HostIOSizeEnum.GENERAL_8K",
+ "id": "fs_65916",
+ "is_advanced_dedup_enabled": false,
+ "is_cifs_notify_on_access_enabled": false,
+ "is_cifs_notify_on_write_enabled": false,
+ "is_cifs_op_locks_enabled": false,
+ "is_cifs_sync_writes_enabled": false,
+ "is_data_reduction_enabled": false,
+ "is_read_only": false,
+ "is_smbca": false,
+ "is_thin_enabled": true,
+ "locking_policy": "FSLockingPolicyEnum.MANDATORY",
+ "metadata_size": 11274289152,
+ "metadata_size_allocated": 4294967296,
+ "min_size_allocated": 0,
+ "name": "test_fs",
+ "nas_server": {
+ "id": "nas_18",
+ "name": "test_nas1"
+ },
+ "nfs_share": null,
+ "per_tier_size_used": [
+ 6979321856,
+ 0,
+ 0
+ ],
+ "pool": {
+ "id": "pool_7",
+ "name": "pool 7"
+ },
+ "pool_full_policy": "ResourcePoolFullPolicyEnum.FAIL_WRITES",
+ "quota_config": {
+ "default_hard_limit": "0B",
+ "default_soft_limit": "0B",
+ "grace_period": "7.0 days",
+ "id": "quotaconfig_171798760421_0",
+ "is_user_quota_enabled": false,
+ "quota_policy": "QuotaPolicyEnum.FILE_SIZE"
+ },
+ "replication_sessions": {
+ "current_transfer_est_remain_time": 0,
+ "id": "***",
+ "last_sync_time": "2022-05-12 11:20:38+00:00",
+ "local_role": "ReplicationSessionReplicationRoleEnum.SOURCE",
+ "max_time_out_of_sync": 60,
+ "members": null,
+ "name": "local_repl_new",
+ "network_status": "ReplicationSessionNetworkStatusEnum.OK",
+ "remote_system": {
+ "UnityRemoteSystem": {
+ "hash": 8735426929707
+ }
+ },
+ "replication_resource_type": "ReplicationEndpointResourceTypeEnum.FILESYSTEM",
+ "src_resource_id": "res_66444",
+ "src_status": "ReplicationSessionStatusEnum.OK",
+ "status": "ReplicationOpStatusEnum.AUTO_SYNC_CONFIGURED",
+ "sync_progress": 0,
+ "sync_state": "ReplicationSessionSyncStateEnum.IDLE"
+ },
+ "size_allocated": 283148288,
+ "size_allocated_total": 4578148352,
+ "size_preallocated": 2401173504,
+ "size_total": 10737418240,
+ "size_total_with_unit": "10.0 GB",
+ "size_used": 1620312064,
+ "snap_count": 2,
+ "snaps_size": 21474869248,
+ "snaps_size_allocated": 32768,
+ "snapshots": [],
+ "supported_protocols": "FSSupportedProtocolEnum.NFS",
+ "tiering_policy": "TieringPolicyEnum.AUTOTIER_HIGH",
+ "type": "FilesystemTypeEnum.FILESYSTEM"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('filesystem')
+
+application_type = "Ansible/1.6.0"
+
+
+class Filesystem(object):
+ """Class with FileSystem operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_filesystem_parameters())
+
+ mutually_exclusive = [['filesystem_name', 'filesystem_id'],
+ ['pool_name', 'pool_id'],
+ ['nas_server_name', 'nas_server_id'],
+ ['snap_schedule_name', 'snap_schedule_id']]
+
+ required_one_of = [['filesystem_name', 'filesystem_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+
+ def get_filesystem(self, name=None, id=None, obj_nas_server=None):
+ """Get the details of a FileSystem.
+ :param filesystem_name: The name of the filesystem
+ :param filesystem_id: The id of the filesystem
+ :param obj_nas_server: NAS Server object instance
+ :return: instance of the respective filesystem if exist.
+ """
+
+ id_or_name = id if id else name
+ errormsg = "Failed to get the filesystem {0} with error {1}"
+
+ try:
+ obj_fs = None
+ if id:
+ if obj_nas_server:
+ obj_fs = self.unity_conn.get_filesystem(
+ _id=id,
+ nas_server=obj_nas_server)
+ else:
+ obj_fs = self.unity_conn.get_filesystem(_id=id)
+
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem "
+ "object %s ", obj_fs)
+ return obj_fs
+ elif name:
+ if not obj_nas_server:
+ err_msg = "NAS Server is required to get the FileSystem"
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ obj_fs = self.unity_conn.get_filesystem(
+ name=name,
+ nas_server=obj_nas_server)
+ if obj_fs:
+ LOG.info(
+ "Successfully got the filesystem object %s ", obj_fs)
+ return obj_fs
+ else:
+ LOG.info("Failed to get the filesystem %s", id_or_name)
+ return None
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ cred_err = "Incorrect username or password , {0}".format(
+ e.message)
+ msg = errormsg.format(id_or_name, cred_err)
+ self.module.fail_json(msg=msg)
+ else:
+ msg = errormsg.format(id_or_name, str(e))
+ self.module.fail_json(msg=msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ return None
+
+ except Exception as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_nas_server(self, name=None, id=None):
+ """Get the instance of a NAS Server.
+ :param name: The NAS Server name
+ :param id: The NAS Server id
+ :return: instance of the respective NAS Server if exists.
+ """
+
+ errormsg = "Failed to get the NAS Server {0} with error {1}"
+ id_or_name = name if name else id
+
+ try:
+ obj_nas = self.unity_conn.get_nas_server(_id=id, name=name)
+ if id and obj_nas.existed:
+ LOG.info("Successfully got the nas server object %s",
+ obj_nas)
+ return obj_nas
+ elif name:
+ LOG.info("Successfully got the nas server object %s ",
+ obj_nas)
+ return obj_nas
+ else:
+ msg = "Failed to get the nas server with {0}".format(
+ id_or_name)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = errormsg.format(name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_pool(self, pool_name=None, pool_id=None):
+ """Get the instance of a pool.
+ :param pool_name: The name of the pool
+ :param pool_id: The id of the pool
+ :return: Dict containing pool details if exists
+ """
+
+ id_or_name = pool_id if pool_id else pool_name
+ errormsg = "Failed to get the pool {0} with error {1}"
+
+ try:
+ obj_pool = self.unity_conn.get_pool(name=pool_name, _id=pool_id)
+
+ if pool_id and obj_pool.existed:
+ LOG.info("Successfully got the pool object %s",
+ obj_pool)
+ return obj_pool
+ if pool_name:
+ LOG.info("Successfully got pool %s", obj_pool)
+ return obj_pool
+ else:
+ msg = "Failed to get the pool with {0}".format(
+ id_or_name)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_tiering_policy_enum(self, tiering_policy):
+ """Get the tiering_policy enum.
+ :param tiering_policy: The tiering_policy string
+ :return: tiering_policy enum
+ """
+
+ if tiering_policy in utils.TieringPolicyEnum.__members__:
+ return utils.TieringPolicyEnum[tiering_policy]
+ else:
+ errormsg = "Invalid choice {0} for tiering policy".format(
+ tiering_policy)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_supported_protocol_enum(self, supported_protocol):
+ """Get the supported_protocol enum.
+ :param supported_protocol: The supported_protocol string
+ :return: supported_protocol enum
+ """
+
+ supported_protocol = "MULTI_PROTOCOL" if \
+ supported_protocol == "MULTIPROTOCOL" else supported_protocol
+ if supported_protocol in utils.FSSupportedProtocolEnum.__members__:
+ return utils.FSSupportedProtocolEnum[supported_protocol]
+ else:
+ errormsg = "Invalid choice {0} for supported_protocol".format(
+ supported_protocol)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_locking_policy_enum(self, locking_policy):
+ """Get the locking_policy enum.
+ :param locking_policy: The locking_policy string
+ :return: locking_policy enum
+ """
+ if locking_policy in utils.FSLockingPolicyEnum.__members__:
+ return utils.FSLockingPolicyEnum[locking_policy]
+ else:
+ errormsg = "Invalid choice {0} for locking_policy".format(
+ locking_policy)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_access_policy_enum(self, access_policy):
+ """Get the access_policy enum.
+ :param access_policy: The access_policy string
+ :return: access_policy enum
+ """
+ if access_policy in utils.AccessPolicyEnum.__members__:
+ return utils.AccessPolicyEnum[access_policy]
+ else:
+ errormsg = "Invalid choice {0} for access_policy".format(
+ access_policy)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def create_filesystem(self, name, obj_pool, obj_nas_server, size):
+ """Create a FileSystem.
+ :param name: Name of the FileSystem
+ :param obj_pool: Storage Pool obj instance
+ :param obj_nas_server: NAS Server obj instance
+ :param size: Total size of a filesystem in bytes
+ :return: FileSystem object on successful creation
+ """
+ try:
+
+ supported_protocol = self.module.params['supported_protocols']
+ supported_protocol = self.get_supported_protocol_enum(
+ supported_protocol) if supported_protocol else None
+ is_thin = self.module.params['is_thin']
+
+ tiering_policy = self.module.params['tiering_policy']
+ tiering_policy = self.get_tiering_policy_enum(tiering_policy) \
+ if tiering_policy else None
+
+ obj_fs = utils.UnityFileSystem.create(
+ self.unity_conn._cli,
+ pool=obj_pool,
+ nas_server=obj_nas_server,
+ name=name,
+ size=size,
+ proto=supported_protocol,
+ is_thin=is_thin,
+ tiering_policy=tiering_policy)
+
+ LOG.info("Successfully created file system , %s", obj_fs)
+ return obj_fs
+
+ except Exception as e:
+ errormsg = "Create filesystem {0} operation failed" \
+ " with error {1}".format(name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def delete_filesystem(self, id):
+ """Delete a FileSystem.
+ :param id: The object instance of the filesystem to be deleted
+ """
+
+ try:
+ obj_fs = self.get_filesystem(id=id)
+ obj_fs_dict = obj_fs._get_properties()
+ if obj_fs_dict['cifs_share'] is not None:
+ errormsg = "The Filesystem has SMB Shares. Hence deleting " \
+ "this filesystem is not safe."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ if obj_fs_dict['nfs_share'] is not None:
+ errormsg = "The FileSystem has NFS Exports. Hence deleting " \
+ "this filesystem is not safe."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ obj_fs.delete()
+ return True
+
+ except Exception as e:
+ errormsg = "Delete operation of FileSystem id:{0} " \
+ "failed with error {1}".format(id,
+ str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def is_modify_required(self, obj_fs, cap_unit):
+ """Checks if any modify required for filesystem attributes
+ :param obj_fs: filesystem instance
+ :param cap_unit: capacity unit
+ :return: filesystem to update dict
+ """
+ try:
+ to_update = {}
+ obj_fs = obj_fs.update()
+ description = self.module.params['description']
+
+ if description is not None and description != obj_fs.description:
+ to_update.update({'description': description})
+
+ size = self.module.params['size']
+ if size and cap_unit:
+ size_byte = int(utils.get_size_bytes(size, cap_unit))
+ if size_byte < obj_fs.size_total:
+ self.module.fail_json(msg="Filesystem size can be "
+ "expanded only")
+ elif size_byte > obj_fs.size_total:
+ to_update.update({'size': size_byte})
+
+ tiering_policy = self.module.params['tiering_policy']
+ if tiering_policy and self.get_tiering_policy_enum(
+ tiering_policy) != obj_fs.tiering_policy:
+ to_update.update({'tiering_policy':
+ self.get_tiering_policy_enum(
+ tiering_policy)})
+
+ is_thin = self.module.params['is_thin']
+ if is_thin is not None and is_thin != obj_fs.is_thin_enabled:
+ to_update.update({'is_thin': is_thin})
+
+ data_reduction = self.module.params['data_reduction']
+ if data_reduction is not None and \
+ data_reduction != obj_fs.is_data_reduction_enabled:
+ to_update.update({'is_compression': data_reduction})
+
+ access_policy = self.module.params['access_policy']
+ if access_policy and self.get_access_policy_enum(
+ access_policy) != obj_fs.access_policy:
+ to_update.update({'access_policy':
+ self.get_access_policy_enum(access_policy)})
+
+ locking_policy = self.module.params['locking_policy']
+ if locking_policy and self.get_locking_policy_enum(
+ locking_policy) != obj_fs.locking_policy:
+ to_update.update({'locking_policy':
+ self.get_locking_policy_enum(
+ locking_policy)})
+
+ snap_sch = obj_fs.storage_resource.snap_schedule
+
+ if self.snap_sch_id is not None:
+ if self.snap_sch_id == "":
+ if snap_sch and snap_sch.id != self.snap_sch_id:
+ to_update.update({'is_snap_schedule_paused': False})
+ elif snap_sch is None or snap_sch.id != self.snap_sch_id:
+ to_update.update({'snap_sch_id': self.snap_sch_id})
+
+ smb_properties = self.module.params['smb_properties']
+ if smb_properties:
+ sync_writes_enabled = \
+ smb_properties['is_smb_sync_writes_enabled']
+ oplocks_enabled = \
+ smb_properties['is_smb_op_locks_enabled']
+ notify_on_write = \
+ smb_properties['is_smb_notify_on_write_enabled']
+ notify_on_access = \
+ smb_properties['is_smb_notify_on_access_enabled']
+ notify_on_change_dir_depth = \
+ smb_properties['smb_notify_on_change_dir_depth']
+
+ if sync_writes_enabled is not None and \
+ sync_writes_enabled != obj_fs.is_cifs_sync_writes_enabled:
+ to_update.update(
+ {'is_cifs_sync_writes_enabled': sync_writes_enabled})
+
+ if oplocks_enabled is not None and \
+ oplocks_enabled != obj_fs.is_cifs_op_locks_enabled:
+ to_update.update(
+ {'is_cifs_op_locks_enabled': oplocks_enabled})
+
+ if notify_on_write is not None and \
+ notify_on_write != \
+ obj_fs.is_cifs_notify_on_write_enabled:
+ to_update.update(
+ {'is_cifs_notify_on_write_enabled': notify_on_write})
+
+ if notify_on_access is not None and \
+ notify_on_access != \
+ obj_fs.is_cifs_notify_on_access_enabled:
+ to_update.update(
+ {'is_cifs_notify_on_access_enabled':
+ notify_on_access})
+
+ if notify_on_change_dir_depth is not None and \
+ notify_on_change_dir_depth != \
+ obj_fs.cifs_notify_on_change_dir_depth:
+ to_update.update(
+ {'cifs_notify_on_change_dir_depth':
+ notify_on_change_dir_depth})
+ if len(to_update) > 0:
+ return to_update
+ else:
+ return None
+
+ except Exception as e:
+ errormsg = "Failed to determine if FileSystem id: {0}" \
+ " modification required with error {1}".format(obj_fs.id,
+ str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def modify_filesystem(self, update_dict, obj_fs):
+ """ modifes attributes for a filesystem instance
+ :param update_dict: modify dict
+ :return: True on Success
+ """
+ try:
+ adv_smb_params = [
+ 'is_cifs_sync_writes_enabled',
+ 'is_cifs_op_locks_enabled',
+ 'is_cifs_notify_on_write_enabled',
+ 'is_cifs_notify_on_access_enabled',
+ 'cifs_notify_on_change_dir_depth']
+
+ cifs_fs_payload = {}
+ fs_update_payload = {}
+
+ for smb_param in adv_smb_params:
+ if smb_param in update_dict.keys():
+ cifs_fs_payload.update({smb_param: update_dict[smb_param]})
+
+ LOG.debug("CIFS Modify Payload: %s", cifs_fs_payload)
+
+ cifs_fs_parameters = obj_fs.prepare_cifs_fs_parameters(
+ **cifs_fs_payload)
+
+ fs_update_params = [
+ 'size',
+ 'is_thin',
+ 'tiering_policy',
+ 'is_compression',
+ 'access_policy',
+ 'locking_policy',
+ 'description',
+ 'cifs_fs_parameters']
+
+ for fs_param in fs_update_params:
+ if fs_param in update_dict.keys():
+ fs_update_payload.update({fs_param: update_dict[fs_param]})
+
+ if cifs_fs_parameters:
+ fs_update_payload.update(
+ {'cifs_fs_parameters': cifs_fs_parameters})
+
+ if "snap_sch_id" in update_dict.keys():
+ fs_update_payload.update(
+ {'snap_schedule_parameters': {'snapSchedule':
+ {'id': update_dict.get('snap_sch_id')}
+ }}
+ )
+ elif "is_snap_schedule_paused" in update_dict.keys():
+ fs_update_payload.update(
+ {'snap_schedule_parameters': {'isSnapSchedulePaused': False}
+ })
+
+ obj_fs = obj_fs.update()
+ resp = obj_fs.modify(**fs_update_payload)
+ LOG.info("Successfully modified the FS with response %s", resp)
+
+ except Exception as e:
+ errormsg = "Failed to modify FileSystem instance id: {0}" \
+ " with error {1}".format(obj_fs.id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_filesystem_display_attributes(self, obj_fs):
+ """get display filesystem attributes
+ :param obj_fs: filesystem instance
+ :return: filesystem dict to display
+ """
+ try:
+ obj_fs = obj_fs.update()
+ filesystem_details = obj_fs._get_properties()
+ filesystem_details['size_total_with_unit'] = utils. \
+ convert_size_with_unit(int(filesystem_details['size_total']))
+ if obj_fs.pool:
+ filesystem_details.update(
+ {'pool': {'name': obj_fs.pool.name,
+ 'id': obj_fs.pool.id}})
+ if obj_fs.nas_server:
+ filesystem_details.update(
+ {'nas_server': {'name': obj_fs.nas_server.name,
+ 'id': obj_fs.nas_server.id}})
+ snap_list = []
+ if obj_fs.has_snap():
+ for snap in obj_fs.snapshots:
+ d = {'name': snap.name, 'id': snap.id}
+ snap_list.append(d)
+ filesystem_details['snapshots'] = snap_list
+
+ if obj_fs.storage_resource.snap_schedule:
+ filesystem_details['snap_schedule_id'] = obj_fs.storage_resource.snap_schedule.id
+ filesystem_details['snap_schedule_name'] = obj_fs.storage_resource.snap_schedule.name
+
+ quota_config_obj = self.get_quota_config_details(obj_fs)
+
+ if quota_config_obj:
+
+ hard_limit = utils.convert_size_with_unit(
+ quota_config_obj.default_hard_limit)
+ soft_limit = utils.convert_size_with_unit(
+ quota_config_obj.default_soft_limit)
+ grace_period = get_time_with_unit(
+ quota_config_obj.grace_period)
+
+ filesystem_details.update({'quota_config':
+ {'id': quota_config_obj.id,
+ 'default_hard_limit': hard_limit,
+ 'default_soft_limit': soft_limit,
+ 'is_user_quota_enabled':
+ quota_config_obj.is_user_quota_enabled,
+ 'quota_policy': quota_config_obj._get_properties()[
+ 'quota_policy'],
+ 'grace_period': grace_period}
+ })
+ filesystem_details['replication_sessions'] = []
+ fs_repl_sessions = self.get_replication_session(obj_fs)
+ if fs_repl_sessions:
+ filesystem_details['replication_sessions'] = \
+ fs_repl_sessions._get_properties()
+ return filesystem_details
+
+ except Exception as e:
+ errormsg = "Failed to display the filesystem {0} with " \
+ "error {1}".format(obj_fs.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_input_string(self):
+ """ validates the input string checks if it is empty string """
+ invalid_string = ""
+ try:
+ for key in self.module.params:
+ val = self.module.params[key]
+ if key == "description" or key == "snap_schedule_name" \
+ or key == "snap_schedule_id":
+ continue
+ if isinstance(val, str) \
+ and val == invalid_string:
+ errmsg = 'Invalid input parameter "" for {0}'.format(
+ key)
+ self.module.fail_json(msg=errmsg)
+ if self.module.params['replication_params'] and self.module.params['replication_state'] is None:
+ self.module.fail_json(msg="Please specify replication_state along with replication_params")
+ except Exception as e:
+ errormsg = "Failed to validate the module param with " \
+ "error {0}".format(str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def resolve_to_snapschedule_id(self, params):
+ """ Get snapshot id for a give snap schedule name
+ :param params: snap schedule name or id
+ :return: snap schedule id after validation
+ """
+
+ try:
+ snap_sch_id = None
+ snapshot_schedule = {}
+ if params["name"]:
+ snapshot_schedule = utils.UnitySnapScheduleList.get(self.unity_conn._cli, name=params["name"])
+ elif params["id"]:
+ snapshot_schedule = utils.UnitySnapScheduleList.get(self.unity_conn._cli, id=params["id"])
+
+ if snapshot_schedule:
+ snap_sch_id = snapshot_schedule.id[0]
+
+ if not snap_sch_id:
+ errormsg = "Failed to find the snapshot schedule id against given name " \
+ "or id: {0}".format(params["name"]), (params["id"])
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ return snap_sch_id
+
+ except Exception as e:
+ errormsg = "Failed to find the snapshot schedules with " \
+ "error {0}".format(str(e))
+
+ def get_quota_config_details(self, obj_fs):
+ """
+ Get the quota config ID mapped to the filesystem
+ :param obj_fs: Filesystem instance
+ :return: Quota config object if exists else None
+ """
+ try:
+ all_quota_config = self.unity_conn.get_quota_config(filesystem=obj_fs)
+ fs_id = obj_fs.id
+
+ if len(all_quota_config) == 0:
+ LOG.error("The quota_config object for new filesystem "
+ "is not updated yet.")
+ return None
+
+ for quota_config in range(len(all_quota_config)):
+ if fs_id and all_quota_config[quota_config].filesystem.id == fs_id and \
+ not all_quota_config[quota_config].tree_quota:
+ msg = "Quota config id for filesystem %s is %s" \
+ % (fs_id, all_quota_config[quota_config].id)
+ LOG.info(msg)
+ return all_quota_config[quota_config]
+
+ except Exception as e:
+ errormsg = "Failed to fetch quota config for filesystem {0} " \
+ " with error {1}".format(fs_id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def modify_quota_config(self, quota_config_obj, quota_config_params):
+ """
+ Modify default quota config settings of newly created filesystem.
+ The default setting of quota config after filesystem creation is:
+ default_soft_limit and default_hard_limit are 0,
+ is_user_quota_enabled is false,
+ grace_period is 7 days and,
+ quota_policy is FILE_SIZE.
+ :param quota_config_obj: Quota config instance
+ :param quota_config_params: Quota config parameters to be modified
+ :return: Boolean whether quota config is modified
+ """
+
+ if quota_config_params:
+ soft_limit = quota_config_params['default_soft_limit']
+ hard_limit = quota_config_params['default_hard_limit']
+ is_user_quota_enabled = quota_config_params['is_user_quota_enabled']
+ quota_policy = quota_config_params['quota_policy']
+ grace_period = quota_config_params['grace_period']
+ cap_unit = quota_config_params['cap_unit']
+ gp_unit = quota_config_params['grace_period_unit']
+
+ if soft_limit:
+ soft_limit_in_bytes = utils.get_size_bytes(soft_limit, cap_unit)
+ else:
+ soft_limit_in_bytes = quota_config_obj.default_soft_limit
+
+ if hard_limit:
+ hard_limit_in_bytes = utils.get_size_bytes(hard_limit, cap_unit)
+ else:
+ hard_limit_in_bytes = quota_config_obj.default_hard_limit
+
+ if grace_period:
+ grace_period_in_sec = get_time_in_seconds(grace_period, gp_unit)
+ else:
+ grace_period_in_sec = quota_config_obj.grace_period
+
+ policy_enum = None
+ policy_enum_val = None
+ if quota_policy:
+ if utils.QuotaPolicyEnum[quota_policy]:
+ policy_enum = utils.QuotaPolicyEnum[quota_policy]
+ policy_enum_val = \
+ utils.QuotaPolicyEnum[quota_policy]._get_properties()['value']
+ else:
+ errormsg = "Invalid choice {0} for quota policy".format(
+ quota_policy)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ # Verify if modify is required. If not required, return False
+ if quota_config_obj.default_hard_limit == hard_limit_in_bytes and \
+ quota_config_obj.default_soft_limit == soft_limit_in_bytes and \
+ quota_config_obj.grace_period == grace_period_in_sec and \
+ ((quota_policy is not None and
+ quota_config_obj.quota_policy == policy_enum) or
+ quota_policy is None) and \
+ (is_user_quota_enabled is None or
+ (is_user_quota_enabled is not None and
+ is_user_quota_enabled == quota_config_obj.is_user_quota_enabled)):
+ return False
+
+ try:
+ resp = self.unity_conn.modify_quota_config(
+ quota_config_id=quota_config_obj.id,
+ grace_period=grace_period_in_sec,
+ default_hard_limit=hard_limit_in_bytes,
+ default_soft_limit=soft_limit_in_bytes,
+ is_user_quota_enabled=is_user_quota_enabled,
+ quota_policy=policy_enum_val)
+ LOG.info("Successfully modified the quota config with response %s", resp)
+ return True
+
+ except Exception as e:
+ errormsg = "Failed to modify quota config for filesystem {0} " \
+ " with error {1}".format(quota_config_obj.filesystem.id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def update_replication_params(self, replication_params):
+ ''' Update replication params '''
+ try:
+ if replication_params['replication_type'] == 'remote' or \
+ (replication_params['replication_type'] is None and
+ replication_params['remote_system']):
+ connection_params = {
+ 'unispherehost': replication_params['remote_system']['remote_system_host'],
+ 'username': replication_params['remote_system']['remote_system_username'],
+ 'password': replication_params['remote_system']['remote_system_password'],
+ 'validate_certs': replication_params['remote_system']['remote_system_verifycert'],
+ 'port': replication_params['remote_system']['remote_system_port']
+ }
+ remote_system_conn = utils.get_unity_unisphere_connection(
+ connection_params, application_type)
+ replication_params['remote_system_name'] = remote_system_conn.name
+ if replication_params['destination_pool_name'] is not None:
+ pool_object = \
+ remote_system_conn.get_pool(name=replication_params['destination_pool_name'])
+ replication_params['destination_pool_id'] = pool_object.id
+ else:
+ if replication_params['destination_pool_name'] is not None:
+ pool_object = \
+ self.unity_conn.get_pool(name=replication_params['destination_pool_name'])
+ replication_params['destination_pool_id'] = pool_object.id
+ except Exception as e:
+ errormsg = "Updating replication params failed" \
+ " with error %s" % str(e)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_rpo(self, replication_params):
+ ''' Validates rpo based on replication mode '''
+ if replication_params['replication_mode'] == 'asynchronous' and \
+ replication_params['rpo'] is None:
+ errormsg = "rpo is required together with 'asynchronous' replication_mode."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ rpo, replication_mode = replication_params['rpo'], replication_params[
+ 'replication_mode']
+
+ if rpo and replication_mode:
+
+ rpo_criteria = {
+ "asynchronous": lambda n: 5 <= n <= 1440,
+ "synchronous": lambda n: n == 0,
+ "manual": lambda n: n == -1
+ }
+
+ if rpo and not rpo_criteria[replication_mode](rpo):
+ errormsg = f"Invalid rpo value - {rpo} for " \
+ f"{replication_mode} replication mode."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_replication_params(self, replication_params):
+ ''' Validate replication params '''
+ if not replication_params:
+ errormsg = "Please specify replication_params to enable replication."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ if replication_params['destination_pool_id'] is not None and \
+ replication_params['destination_pool_name'] is not None:
+ errormsg = "'destination_pool_id' and 'destination_pool_name' is mutually exclusive."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ self.validate_rpo(replication_params)
+ # Validate replication type
+ if replication_params['replication_type'] == 'remote' and replication_params['remote_system'] is None:
+ errormsg = "Remote_system is required together with 'remote' replication_type"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_create_replication_params(self, replication_params):
+ ''' Validate replication params '''
+
+ if replication_params['destination_pool_id'] is None and \
+ replication_params['destination_pool_name'] is None:
+ errormsg = "Either 'destination_pool_id' or 'destination_pool_name' is required to enable replication."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ keys = ['replication_mode', 'replication_type']
+ for key in keys:
+ if replication_params[key] is None:
+ errormsg = "Please specify %s to enable replication." % key
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def modify_replication_session(self, obj_fs, repl_session, replication_params):
+ """ Modify the replication session
+ :param: obj_fs: Filesystem object
+ :param: repl_session: Replication session to be modified
+ :param: replication_params: Module input params
+ :return: True if modification is successful
+ """
+ try:
+ LOG.info("Modifying replication session of filesystem %s", obj_fs.name)
+ modify_payload = {}
+ if replication_params['replication_mode']:
+ if replication_params['replication_mode'] == 'manual':
+ rpo = -1
+ elif replication_params['replication_mode'] == 'synchronous':
+ rpo = 0
+ elif replication_params['rpo']:
+ rpo = replication_params['rpo']
+ name = repl_session.name
+ if replication_params['new_replication_name'] and \
+ name != replication_params['new_replication_name']:
+ name = replication_params['new_replication_name']
+
+ if repl_session.name != name:
+ modify_payload['name'] = name
+ if ((replication_params['replication_mode'] or replication_params['rpo']) and
+ repl_session.max_time_out_of_sync != rpo):
+ modify_payload['max_time_out_of_sync'] = rpo
+
+ if modify_payload:
+ repl_session.modify(**modify_payload)
+ return True
+
+ return False
+ except Exception as e:
+ errormsg = "Modifying replication session failed with error %s" % e
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def enable_replication(self, obj_fs, replication_params):
+ """ Enable the replication session
+ :param: obj_fs: Filesystem object
+ :param: replication_params: Module input params
+ :return: True if enabling replication is successful
+ """
+ try:
+ self.validate_replication_params(replication_params)
+ self.update_replication_params(replication_params)
+
+ repl_session = \
+ self.get_replication_session_on_filter(obj_fs, replication_params, "modify")
+ if repl_session:
+ return self.modify_replication_session(obj_fs, repl_session, replication_params)
+
+ self.validate_create_replication_params(replication_params)
+ replication_args_list = get_replication_args_list(replication_params)
+ if 'remote_system_name' in replication_params:
+ remote_system_name = replication_params['remote_system_name']
+ remote_system_list = self.unity_conn.get_remote_system()
+ for remote_system in remote_system_list:
+ if remote_system.name == remote_system_name:
+ replication_args_list['remote_system'] = remote_system
+ break
+ if 'remote_system' not in replication_args_list.keys():
+ errormsg = "Remote system %s is not found" % (remote_system_name)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ LOG.info("Enabling replication to the filesystem %s", obj_fs.name)
+ obj_fs.replicate_with_dst_resource_provisioning(**replication_args_list)
+ return True
+ except Exception as e:
+ errormsg = "Enabling replication to the filesystem %s failed " \
+ "with error %s" % (obj_fs.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def disable_replication(self, obj_fs, replication_params):
+ """ Remove replication from the filesystem
+ :param: replication_params: Module input params
+ :return: True if disabling replication is successful
+ """
+ try:
+ LOG.info(("Disabling replication on the filesystem %s", obj_fs.name))
+ if replication_params:
+ self.update_replication_params(replication_params)
+ repl_session = \
+ self.get_replication_session_on_filter(obj_fs, replication_params, "delete")
+ if repl_session:
+ repl_session.delete()
+ return True
+ return False
+ except Exception as e:
+ errormsg = "Disabling replication on the filesystem %s failed " \
+ "with error %s" % (obj_fs.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_replication_session_on_filter(self, obj_fs, replication_params, action):
+ if replication_params and replication_params['remote_system']:
+ repl_session = \
+ self.get_replication_session(obj_fs, filter_key="remote_system_name",
+ replication_params=replication_params)
+ elif replication_params and replication_params['replication_name']:
+ repl_session = \
+ self.get_replication_session(obj_fs, filter_key="name",
+ name=replication_params['replication_name'])
+ else:
+ repl_session = self.get_replication_session(obj_fs, action=action)
+ if repl_session and action and replication_params and \
+ replication_params['replication_type'] == 'local' and \
+ repl_session.remote_system.name != self.unity_conn.name:
+ return None
+
+ return repl_session
+
+ def get_replication_session(self, obj_fs, filter_key=None, replication_params=None, name=None, action=None):
+ """ Retrieves the replication sessions configured for the filesystem
+ :param: obj_fs: Filesystem object
+ :param: filter_key: Key to filter replication sessions
+ :param: replication_params: Module input params
+ :param: name: Replication session name
+ :param: action: Specifies modify or delete action on replication session
+ :return: Replication session details
+ """
+ try:
+ repl_session = self.unity_conn.get_replication_session(src_resource_id=obj_fs.storage_resource.id)
+ if not filter_key and repl_session:
+ if len(repl_session) > 1:
+ if action:
+ error_msg = 'There are multiple replication sessions for the filesystem.'\
+ ' Please specify replication_name in replication_params to %s.' % action
+ self.module.fail_json(msg=error_msg)
+ return repl_session
+ return repl_session[0]
+ for session in repl_session:
+ if filter_key == 'remote_system_name' and \
+ session.remote_system.name == replication_params['remote_system_name']:
+ return session
+ if filter_key == 'name' and session.name == name:
+ return session
+ return None
+ except Exception as e:
+ errormsg = "Retrieving replication session on the filesystem failed " \
+ "with error %s", str(e)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on filesystem module based on parameters
+ passed in the playbook
+ """
+ filesystem_name = self.module.params['filesystem_name']
+ filesystem_id = self.module.params['filesystem_id']
+ nas_server_name = self.module.params['nas_server_name']
+ nas_server_id = self.module.params['nas_server_id']
+ pool_name = self.module.params['pool_name']
+ pool_id = self.module.params['pool_id']
+ size = self.module.params['size']
+ cap_unit = self.module.params['cap_unit']
+ quota_config = self.module.params['quota_config']
+ replication_params = self.module.params['replication_params']
+ replication_state = self.module.params['replication_state']
+ state = self.module.params['state']
+ snap_schedule_name = self.module.params['snap_schedule_name']
+ snap_schedule_id = self.module.params['snap_schedule_id']
+
+ # result is a dictionary to contain end state and FileSystem details
+ changed = False
+ result = dict(
+ changed=False,
+ filesystem_details={}
+ )
+
+ to_modify_dict = None
+ filesystem_details = None
+ quota_config_obj = None
+
+ self.validate_input_string()
+
+ if size is not None and size == 0:
+ self.module.fail_json(msg="Size can not be 0 (Zero)")
+
+ if size and not cap_unit:
+ cap_unit = 'GB'
+
+ if quota_config:
+ if (quota_config['default_hard_limit'] is not None
+ or quota_config['default_soft_limit'] is not None) and \
+ not quota_config['cap_unit']:
+ quota_config['cap_unit'] = 'GB'
+
+ if quota_config['grace_period'] is not None \
+ and quota_config['grace_period_unit'] is None:
+ quota_config['grace_period_unit'] = 'days'
+
+ if quota_config['grace_period'] is not None \
+ and quota_config['grace_period'] <= 0:
+ self.module.fail_json(msg="Invalid grace_period provided. "
+ "Must be greater than 0.")
+
+ if quota_config['default_soft_limit'] is not None \
+ and utils.is_size_negative(quota_config['default_soft_limit']):
+ self.module.fail_json(msg="Invalid default_soft_limit provided. "
+ "Must be greater than or equal to 0.")
+
+ if quota_config['default_hard_limit'] is not None \
+ and utils.is_size_negative(quota_config['default_hard_limit']):
+ self.module.fail_json(msg="Invalid default_hard_limit provided. "
+ "Must be greater than or equal to 0.")
+
+ if (cap_unit is not None) and not size:
+ self.module.fail_json(msg="cap_unit can be specified along "
+ "with size")
+
+ nas_server = None
+ if nas_server_name or nas_server_id:
+ nas_server = self.get_nas_server(
+ name=nas_server_name, id=nas_server_id)
+
+ obj_pool = None
+ if pool_name or pool_id:
+ obj_pool = self.get_pool(pool_name=pool_name, pool_id=pool_id)
+
+ obj_fs = None
+ obj_fs = self.get_filesystem(name=filesystem_name,
+ id=filesystem_id,
+ obj_nas_server=nas_server)
+
+ self.snap_sch_id = None
+ if snap_schedule_name or snap_schedule_id:
+ snap_schedule_params = {
+ "name": snap_schedule_name,
+ "id": snap_schedule_id
+ }
+ self.snap_sch_id = self.resolve_to_snapschedule_id(snap_schedule_params)
+ elif snap_schedule_name == "" or snap_schedule_id == "":
+ self.snap_sch_id = ""
+
+ if obj_fs:
+ filesystem_details = obj_fs._get_properties()
+ filesystem_id = obj_fs.get_id()
+ to_modify_dict = self.is_modify_required(obj_fs, cap_unit)
+ LOG.info("From Mod Op, to_modify_dict: %s", to_modify_dict)
+
+ if state == 'present' and not filesystem_details:
+ if not filesystem_name:
+ msg_noname = "FileSystem with id {0} is not found, unable to " \
+ "create a FileSystem without a valid " \
+ "filesystem_name".format(filesystem_id)
+ self.module.fail_json(msg=msg_noname)
+
+ if not pool_name and not pool_id:
+ self.module.fail_json(msg="pool_id or pool_name is required "
+ "to create new filesystem")
+ if not size:
+ self.module.fail_json(msg="Size is required to create"
+ " a filesystem")
+ size = utils.get_size_bytes(size, cap_unit)
+
+ obj_fs = self.create_filesystem(name=filesystem_name,
+ obj_pool=obj_pool,
+ obj_nas_server=nas_server,
+ size=size)
+
+ LOG.debug("Successfully created filesystem , %s", obj_fs)
+ filesystem_id = obj_fs.id
+ filesystem_details = obj_fs._get_properties()
+ to_modify_dict = self.is_modify_required(obj_fs, cap_unit)
+ LOG.debug("Got filesystem id , %s", filesystem_id)
+ changed = True
+
+ if state == 'present' and filesystem_details and to_modify_dict:
+ self.modify_filesystem(update_dict=to_modify_dict, obj_fs=obj_fs)
+ changed = True
+
+ """
+ Set quota configuration
+ """
+ if state == "present" and filesystem_details and quota_config:
+ quota_config_obj = self.get_quota_config_details(obj_fs)
+
+ if quota_config_obj is not None:
+ is_quota_config_modified = self.modify_quota_config(
+ quota_config_obj=quota_config_obj,
+ quota_config_params=quota_config)
+
+ if is_quota_config_modified:
+ changed = True
+ else:
+ self.module.fail_json(msg="One or more operations related"
+ " to this task failed because the"
+ " new object created could not be fetched."
+ " Please rerun the task for expected result.")
+
+ if state == 'present' and filesystem_details and replication_state is not None:
+ if replication_state == 'enable':
+ changed = self.enable_replication(obj_fs, replication_params)
+ else:
+ changed = self.disable_replication(obj_fs, replication_params)
+
+ if state == 'absent' and filesystem_details:
+ changed = self.delete_filesystem(filesystem_id)
+ filesystem_details = None
+
+ if state == 'present' and filesystem_details:
+ filesystem_details = self.get_filesystem_display_attributes(
+ obj_fs=obj_fs)
+
+ result['changed'] = changed
+ result['filesystem_details'] = filesystem_details
+ self.module.exit_json(**result)
+
+
+def get_time_in_seconds(time, time_units):
+ """This method get time is seconds"""
+ min_in_sec = 60
+ hour_in_sec = 60 * 60
+ day_in_sec = 24 * 60 * 60
+ if time is not None and time > 0:
+ if time_units in 'minutes':
+ return time * min_in_sec
+ elif time_units in 'hours':
+ return time * hour_in_sec
+ elif time_units in 'days':
+ return time * day_in_sec
+ else:
+ return time
+ else:
+ return 0
+
+
+def get_time_with_unit(time):
+ """This method sets seconds in minutes, hours or days."""
+ sec_in_min = 60
+ sec_in_hour = 60 * 60
+ sec_in_day = 24 * 60 * 60
+
+ if time % sec_in_day == 0:
+ time = time / sec_in_day
+ unit = 'days'
+
+ elif time % sec_in_hour == 0:
+ time = time / sec_in_hour
+ unit = 'hours'
+
+ else:
+ time = time / sec_in_min
+ unit = 'minutes'
+ return "%s %s" % (time, unit)
+
+
+def get_replication_args_list(replication_params):
+ """Returns the replication args for payload"""
+ replication_args_list = {
+ 'dst_pool_id': replication_params['destination_pool_id']
+ }
+
+ if replication_params['replication_name']:
+ replication_args_list['replication_name'] = replication_params['replication_name']
+
+ if 'replication_mode' in replication_params:
+ if replication_params['replication_mode'] == 'asynchronous':
+ replication_args_list['max_time_out_of_sync'] = replication_params['rpo']
+ elif replication_params['replication_mode'] == 'synchronous':
+ replication_args_list['max_time_out_of_sync'] = 0
+ else:
+ replication_args_list['max_time_out_of_sync'] = -1
+
+ return replication_args_list
+
+
+def get_filesystem_parameters():
+ """This method provide parameters required for the ansible filesystem
+ module on Unity"""
+ return dict(
+ filesystem_name=dict(required=False, type='str'),
+ filesystem_id=dict(required=False, type='str'),
+ nas_server_name=dict(required=False, type='str'),
+ nas_server_id=dict(required=False, type='str'),
+ description=dict(required=False, type='str'),
+ pool_name=dict(required=False, type='str'),
+ pool_id=dict(required=False, type='str'),
+ size=dict(required=False, type='int'),
+ cap_unit=dict(required=False, type='str', choices=['GB', 'TB']),
+ is_thin=dict(required=False, type='bool'),
+ data_reduction=dict(required=False, type='bool'),
+ supported_protocols=dict(required=False, type='str',
+ choices=['NFS', 'CIFS', 'MULTIPROTOCOL']),
+ smb_properties=dict(type='dict', options=dict(
+ is_smb_sync_writes_enabled=dict(type='bool'),
+ is_smb_notify_on_access_enabled=dict(type='bool'),
+ is_smb_op_locks_enabled=dict(type='bool'),
+ is_smb_notify_on_write_enabled=dict(type='bool'),
+ smb_notify_on_change_dir_depth=dict(type='int')
+ )),
+ access_policy=dict(required=False, type='str',
+ choices=['NATIVE', 'UNIX', 'WINDOWS']),
+ locking_policy=dict(required=False, type='str',
+ choices=['ADVISORY', 'MANDATORY']),
+ tiering_policy=dict(required=False, type='str', choices=[
+ 'AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']),
+ snap_schedule_name=dict(required=False, type='str'),
+ snap_schedule_id=dict(required=False, type='str'),
+ quota_config=dict(required=False, type='dict', options=dict(
+ grace_period=dict(required=False, type='int'),
+ grace_period_unit=dict(required=False, type='str', choices=['minutes', 'hours', 'days']),
+ default_hard_limit=dict(required=False, type='int'),
+ default_soft_limit=dict(required=False, type='int'),
+ is_user_quota_enabled=dict(required=False, type='bool'),
+ quota_policy=dict(required=False, type='str', choices=['FILE_SIZE', 'BLOCKS']),
+ cap_unit=dict(required=False, type='str', choices=['MB', 'GB', 'TB']),
+ ), mutually_exclusive=[['is_user_quota_enabled', 'quota_policy']]),
+ replication_params=dict(type='dict', options=dict(
+ replication_name=dict(type='str'),
+ new_replication_name=dict(type='str'),
+ replication_type=dict(type='str', choices=['local', 'remote']),
+ replication_mode=dict(type='str',
+ choices=['synchronous', 'asynchronous',
+ 'manual']),
+ rpo=dict(type='int'),
+ remote_system=dict(type='dict',
+ options=dict(
+ remote_system_host=dict(type='str', required=True),
+ remote_system_verifycert=dict(type='bool', required=False,
+ default=True),
+ remote_system_username=dict(type='str', required=True),
+ remote_system_password=dict(type='str', required=True, no_log=True),
+ remote_system_port=dict(type='int', required=False, default=443)
+ )),
+ destination_pool_name=dict(type='str'),
+ destination_pool_id=dict(type='str')
+ )),
+ replication_state=dict(type='str', choices=['enable', 'disable']),
+ state=dict(required=True, type='str', choices=['present', 'absent'])
+ )
+
+
+def main():
+ """ Create Unity FileSystem object and perform action on it
+ based on user input from playbook"""
+ obj = Filesystem()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py b/ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py
new file mode 100644
index 000000000..35e536a47
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py
@@ -0,0 +1,769 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+""" Ansible module for managing Filesystem Snapshots on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: filesystem_snapshot
+short_description: Manage filesystem snapshot on the Unity storage system
+description:
+- Managing Filesystem Snapshot on the Unity storage system includes
+ create filesystem snapshot, get filesystem snapshot, modify filesystem
+ snapshot and delete filesystem snapshot.
+version_added: '1.1.0'
+extends_documentation_fragment:
+ - dellemc.unity.unity
+author:
+- Rajshree Khare (@kharer5) <ansible.team@dell.com>
+options:
+ snapshot_name:
+ description:
+ - The name of the filesystem snapshot.
+ - Mandatory parameter for creating a filesystem snapshot.
+ - For all other operations either I(snapshot_name) or I(snapshot_id)
+ is required.
+ type: str
+ snapshot_id:
+ description:
+ - During creation snapshot_id is auto generated.
+ - For all other operations either I(snapshot_id) or I(snapshot_name)
+ is required.
+ type: str
+ filesystem_name:
+ description:
+ - The name of the Filesystem for which snapshot is created.
+ - For creation of filesystem snapshot either I(filesystem_name) or
+ I(filesystem_id) is required.
+ - Not required for other operations.
+ type: str
+ filesystem_id:
+ description:
+ - The ID of the Filesystem for which snapshot is created.
+ - For creation of filesystem snapshot either I(filesystem_id) or
+ I(filesystem_name) is required.
+ - Not required for other operations.
+ type: str
+ nas_server_name:
+ description:
+ - The name of the NAS server in which the Filesystem is created.
+ - For creation of filesystem snapshot either I(nas_server_name) or
+ I(nas_server_id) is required.
+ - Not required for other operations.
+ type: str
+ nas_server_id:
+ description:
+ - The ID of the NAS server in which the Filesystem is created.
+ - For creation of filesystem snapshot either I(filesystem_id) or
+ I(filesystem_name) is required.
+ - Not required for other operations.
+ type: str
+ auto_delete:
+ description:
+ - This option specifies whether or not the filesystem snapshot will be
+ automatically deleted.
+ - If set to C(true), the filesystem snapshot will expire based on the pool
+ auto deletion policy.
+ - If set to C(false), the filesystem snapshot will not be auto deleted
+ based on the pool auto deletion policy.
+ - Option I(auto_delete) can not be set to C(true), if I(expiry_time) is specified.
+ - If during creation neither I(auto_delete) nor I(expiry_time) is mentioned
+ then the filesystem snapshot will be created keeping I(auto_delete) as
+ C(true).
+ - Once the I(expiry_time) is set, then the filesystem snapshot cannot be
+ assigned to the auto delete policy.
+ type: bool
+ expiry_time:
+ description:
+ - This option is for specifying the date and time after which the
+ filesystem snapshot will expire.
+ - The time is to be mentioned in UTC timezone.
+ - The format is "MM/DD/YYYY HH:MM". Year must be in 4 digits.
+ type: str
+ description:
+ description:
+ - The additional information about the filesystem snapshot can be
+ provided using this option.
+ - The description can be removed by passing an empty string.
+ type: str
+ fs_access_type:
+ description:
+ - Access type of the filesystem snapshot.
+ - Required only during creation of filesystem snapshot.
+ - If not given, snapshot's access type will be C(Checkpoint).
+ type: str
+ choices: ['Checkpoint' , 'Protocol']
+ state:
+ description:
+ - The state option is used to mention the existence of the filesystem
+ snapshot.
+ type: str
+ required: true
+ choices: ['absent', 'present']
+notes:
+ - Filesystem snapshot cannot be deleted, if it has nfs or smb share.
+ - The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+ - name: Create Filesystem Snapshot
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap"
+ filesystem_name: "ansible_test_FS"
+ nas_server_name: "lglad069"
+ description: "Created using playbook"
+ auto_delete: True
+ fs_access_type: "Protocol"
+ state: "present"
+
+ - name: Create Filesystem Snapshot with expiry time
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap_1"
+ filesystem_name: "ansible_test_FS_1"
+ nas_server_name: "lglad069"
+ description: "Created using playbook"
+ expiry_time: "04/15/2021 2:30"
+ fs_access_type: "Protocol"
+ state: "present"
+
+ - name: Get Filesystem Snapshot Details using Name
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap"
+ state: "present"
+
+ - name: Get Filesystem Snapshot Details using ID
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_id: "10008000403"
+ state: "present"
+
+ - name: Update Filesystem Snapshot attributes
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap"
+ description: "Description updated"
+ auto_delete: False
+ expiry_time: "04/15/2021 5:30"
+ state: "present"
+
+ - name: Update Filesystem Snapshot attributes using ID
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_id: "10008000403"
+ expiry_time: "04/18/2021 8:30"
+ state: "present"
+
+ - name: Delete Filesystem Snapshot using Name
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "ansible_test_FS_snap"
+ state: "absent"
+
+ - name: Delete Filesystem Snapshot using ID
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_id: "10008000403"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+
+filesystem_snapshot_details:
+ description: Details of the filesystem snapshot.
+ returned: When filesystem snapshot exists
+ type: dict
+ contains:
+ access_type:
+ description: Access type of filesystem snapshot.
+ type: str
+ attached_wwn:
+ description: Attached WWN details.
+ type: str
+ creation_time:
+ description: Creation time of filesystem snapshot.
+ type: str
+ creator_schedule:
+ description: Creator schedule of filesystem snapshot.
+ type: str
+ creator_type:
+ description: Creator type for filesystem snapshot.
+ type: str
+ creator_user:
+ description: Creator user for filesystem snapshot.
+ type: str
+ description:
+ description: Description of the filesystem snapshot.
+ type: str
+ expiration_time:
+ description: Date and time after which the filesystem snapshot
+ will expire.
+ type: str
+ is_auto_delete:
+ description: Is the filesystem snapshot is auto deleted or not.
+ type: bool
+ id:
+ description: Unique identifier of the filesystem snapshot
+ instance.
+ type: str
+ name:
+ description: The name of the filesystem snapshot.
+ type: str
+ size:
+ description: Size of the filesystem snapshot.
+ type: int
+ filesystem_name:
+ description: Name of the filesystem for which the snapshot exists.
+ type: str
+ filesystem_id:
+ description: Id of the filesystem for which the snapshot exists.
+ type: str
+ nas_server_name:
+ description: Name of the NAS server on which filesystem exists.
+ type: str
+ nas_server_id:
+ description: Id of the NAS server on which filesystem exists.
+ type: str
+ sample: {
+ "access_type": "FilesystemSnapAccessTypeEnum.CHECKPOINT",
+ "attached_wwn": null,
+ "creation_time": "2022-10-21 04:42:53.951000+00:00",
+ "creator_schedule": null,
+ "creator_type": "SnapCreatorTypeEnum.USER_CUSTOM",
+ "creator_user": {
+ "id": "user_admin"
+ },
+ "description": "Created using playbook",
+ "existed": true,
+ "expiration_time": null,
+ "filesystem_id": "fs_137",
+ "filesystem_name": "test",
+ "hash": 8739894572587,
+ "host_access": null,
+ "id": "171798721695",
+ "io_limit_policy": null,
+ "is_auto_delete": true,
+ "is_modifiable": false,
+ "is_modified": false,
+ "is_read_only": true,
+ "is_system_snap": false,
+ "last_writable_time": null,
+ "lun": null,
+ "name": "test_FS_snap_1",
+ "nas_server_id": "nas_1",
+ "nas_server_name": "lglad072",
+ "parent_snap": null,
+ "size": 107374182400,
+ "snap_group": null,
+ "state": "SnapStateEnum.READY"
+ }
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+from datetime import datetime
+
+LOG = utils.get_logger('filesystem_snapshot')
+
+application_type = "Ansible/1.6.0"
+
+
+class FilesystemSnapshot(object):
+ """Class with Filesystem Snapshot operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_snapshot_parameters())
+
+ mutually_exclusive = [['snapshot_name', 'snapshot_id'],
+ ['filesystem_name', 'filesystem_id'],
+ ['nas_server_name', 'nas_server_id']]
+
+ required_one_of = [['snapshot_name', 'snapshot_id']]
+ # initialize the ansible module
+ self.module = AnsibleModule(argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+
+ # result is a dictionary that contains changed status and
+ # filesystem snapshot details
+ self.result = {"changed": False,
+ 'filesystem_snapshot_details': {}}
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ self.snap_obj = utils.snap.UnitySnap(self.unity_conn)
+ LOG.info('Connection established with the Unity Array')
+
+ def validate_expiry_time(self, expiry_time):
+ """Validates the specified expiry_time"""
+ try:
+ datetime.strptime(expiry_time, '%m/%d/%Y %H:%M')
+ except ValueError:
+ error_msg = ("expiry_time: %s, not in MM/DD/YYYY HH:MM format." %
+ expiry_time)
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def to_update(self, fs_snapshot, description=None, auto_del=None,
+ expiry_time=None, fs_access_type=None):
+ """Determines whether to update the snapshot or not"""
+ snap_modify_dict = dict()
+
+ if fs_access_type and fs_access_type != fs_snapshot.access_type:
+ error_message = "Modification of access type is not allowed."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ # If the snapshot has is_auto_delete True,
+ # Check if auto_delete in the input is either None or True
+ if expiry_time and fs_snapshot.is_auto_delete \
+ and (auto_del is None or auto_del):
+ self.module.fail_json(msg="expiry_time can be assigned when"
+ " auto delete is False.")
+ if auto_del is not None:
+ if fs_snapshot.expiration_time:
+ error_msg = "expiry_time for filesystem snapshot is set." \
+ " Once it is set then snapshot cannot" \
+ " be assigned to auto_delete policy."
+ self.module.fail_json(msg=error_msg)
+ if auto_del != fs_snapshot.is_auto_delete:
+ snap_modify_dict['is_auto_delete'] = auto_del
+
+ if description is not None and description != fs_snapshot.description:
+ snap_modify_dict['description'] = description
+
+ if to_update_expiry_time(fs_snapshot, expiry_time):
+ snap_modify_dict['expiry_time'] = expiry_time
+ LOG.info("Snapshot modification details: %s", snap_modify_dict)
+ return snap_modify_dict
+
+ def update_filesystem_snapshot(self, fs_snapshot, snap_modify_dict):
+ try:
+ duration = None
+ if 'expiry_time' in snap_modify_dict \
+ and snap_modify_dict['expiry_time']:
+ duration = convert_timestamp_to_sec(
+ snap_modify_dict['expiry_time'],
+ self.unity_conn.system_time)
+ if duration and duration <= 0:
+ self.module.fail_json(msg="expiry_time should be after"
+ " the current system time.")
+ if 'is_auto_delete' in snap_modify_dict \
+ and snap_modify_dict['is_auto_delete'] is not None:
+ auto_delete = snap_modify_dict['is_auto_delete']
+ else:
+ auto_delete = None
+ if 'description' in snap_modify_dict \
+ and (snap_modify_dict['description']
+ or len(snap_modify_dict['description']) == 0):
+ description = snap_modify_dict['description']
+ else:
+ description = None
+
+ fs_snapshot.modify(retentionDuration=duration,
+ isAutoDelete=auto_delete,
+ description=description)
+ fs_snapshot.update()
+ except Exception as e:
+ error_msg = "Failed to modify filesystem snapshot" \
+ " [name: %s , id: %s] with error %s."\
+ % (fs_snapshot.name, fs_snapshot.id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def create_filesystem_snapshot(self, snap_name, storage_id,
+ description=None, auto_del=None,
+ expiry_time=None, fs_access_type=None):
+ try:
+ duration = None
+ if expiry_time:
+ duration = convert_timestamp_to_sec(
+ expiry_time, self.unity_conn.system_time)
+ if duration <= 0:
+ self.module.fail_json(msg="expiry_time should be after"
+ " the current system time.")
+
+ fs_snapshot = self.snap_obj.create(
+ cli=self.unity_conn._cli, storage_resource=storage_id,
+ name=snap_name, description=description,
+ is_auto_delete=auto_del, retention_duration=duration,
+ fs_access_type=fs_access_type)
+ return fs_snapshot
+ except Exception as e:
+ error_msg = "Failed to create filesystem snapshot" \
+ " %s with error %s" % (snap_name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def is_snap_has_share(self, fs_snap):
+ try:
+ obj = self.unity_conn.get_nfs_share(snap=fs_snap) or \
+ self.unity_conn.get_cifs_share(snap=fs_snap)
+ if len(obj) > 0:
+ LOG.info("Snapshot has %s nfs/smb share/s", len(obj))
+ return True
+ except Exception as e:
+ msg = "Failed to get nfs/smb share from filesystem snapshot. " \
+ "error: %s" % str(e)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ return False
+
+ def delete_fs_snapshot(self, fs_snapshot):
+ try:
+ # Checking whether nfs/smb share created from fs_snapshot
+ if self.is_snap_has_share(fs_snapshot):
+ msg = "Filesystem snapshot cannot be deleted because it has " \
+ "nfs/smb share"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ fs_snapshot.delete()
+ return None
+
+ except Exception as e:
+ error_msg = "Failed to delete filesystem snapshot" \
+ " [name: %s, id: %s] with error %s." \
+ % (fs_snapshot.name, fs_snapshot.id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_fs_snapshot_obj(self, name=None, id=None):
+ fs_snapshot = id if id else name
+ msg = "Failed to get details of filesystem snapshot %s with error %s."
+ try:
+ fs_snap_obj = self.unity_conn.get_snap(name=name, _id=id)
+ if fs_snap_obj and fs_snap_obj.existed:
+ LOG.info("Successfully got the filesystem snapshot object "
+ "%s.", fs_snap_obj)
+ else:
+ fs_snap_obj = None
+ return fs_snap_obj
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ cred_err = ("Incorrect username or password , %s" % e.message)
+ self.module.fail_json(msg=cred_err)
+ else:
+ err_msg = msg % (fs_snapshot, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ err_msg = msg % (fs_snapshot, str(e))
+ LOG.error(err_msg)
+ return None
+
+ except Exception as e:
+ err_msg = msg % (fs_snapshot, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ def get_filesystem_obj(self, nas_server=None, name=None, id=None):
+ filesystem = id if id else name
+ try:
+ obj_fs = None
+ if name:
+ if not nas_server:
+ err_msg = "NAS Server is required to get the FileSystem."
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+ obj_fs = self.unity_conn.get_filesystem(name=name,
+ nas_server=nas_server)
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem object %s.",
+ obj_fs)
+ return obj_fs
+ if id:
+ if nas_server:
+ obj_fs = self.unity_conn\
+ .get_filesystem(id=id, nas_server=nas_server)
+ else:
+ obj_fs = self.unity_conn.get_filesystem(id=id)
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem object %s.",
+ obj_fs)
+ return obj_fs
+ except Exception as e:
+ error_msg = "Failed to get filesystem %s with error %s."\
+ % (filesystem, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_nas_server_obj(self, name=None, id=None):
+ nas_server = id if id else name
+ error_msg = ("Failed to get NAS server %s." % nas_server)
+ try:
+ obj_nas = self.unity_conn.get_nas_server(_id=id, name=name)
+ if (name and obj_nas.existed) or (id and obj_nas.existed):
+ LOG.info("Successfully got the NAS server object %s.",
+ obj_nas)
+ return obj_nas
+ else:
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+ except Exception as e:
+ error_msg = "Failed to get NAS server %s with error %s."\
+ % (nas_server, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def create_fs_snapshot_details_dict(self, fs_snapshot):
+ """ Add name and id of storage resource to filesystem snapshot
+ details """
+
+ snapshot_dict = fs_snapshot._get_properties()
+ del snapshot_dict['storage_resource']
+
+ snapshot_dict['filesystem_name'] = fs_snapshot.storage_resource.name
+ snapshot_dict['filesystem_id'] = fs_snapshot.storage_resource.filesystem.id
+
+ obj_fs = self.unity_conn.\
+ get_filesystem(id=fs_snapshot.storage_resource.filesystem.id)
+ if obj_fs and obj_fs.existed:
+ snapshot_dict['nas_server_name'] = obj_fs.nas_server[0].name
+ snapshot_dict['nas_server_id'] = obj_fs.nas_server[0].id
+
+ return snapshot_dict
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on snapshot module based on parameters
+ chosen in playbook
+ """
+ snapshot_name = self.module.params['snapshot_name']
+ snapshot_id = self.module.params['snapshot_id']
+ filesystem_name = self.module.params['filesystem_name']
+ filesystem_id = self.module.params['filesystem_id']
+ nas_server_name = self.module.params['nas_server_name']
+ nas_server_id = self.module.params['nas_server_id']
+ auto_delete = self.module.params['auto_delete']
+ expiry_time = self.module.params['expiry_time']
+ description = self.module.params['description']
+ fs_access_type = self.module.params['fs_access_type']
+ state = self.module.params['state']
+ nas_server_resource = None
+ filesystem_resource = None
+ changed = False
+
+ LOG.info("Getting Filesystem Snapshot details.")
+ fs_snapshot = self.get_fs_snapshot_obj(name=snapshot_name,
+ id=snapshot_id)
+
+ msg = "Filesystem Snapshot details: %s." % str(fs_snapshot)
+ LOG.info(msg)
+
+ # Get NAS server Object
+ if nas_server_name is not None:
+ if nas_server_name == "" or nas_server_name.isspace():
+ self.module.fail_json(msg="Invalid nas_server_name given,"
+ " Please provide a valid name.")
+ nas_server_resource = self\
+ .get_nas_server_obj(name=nas_server_name)
+ elif nas_server_id is not None:
+ if nas_server_id == "" or nas_server_id.isspace():
+ self.module.fail_json(msg="Invalid nas_server_id given,"
+ " Please provide a valid ID.")
+ nas_server_resource = self.get_nas_server_obj(id=nas_server_id)
+
+ # Get Filesystem Object
+ if filesystem_name is not None:
+ if filesystem_name == "" or filesystem_name.isspace():
+ self.module.fail_json(msg="Invalid filesystem_name given,"
+ " Please provide a valid name.")
+ filesystem_resource = self\
+ .get_filesystem_obj(nas_server=nas_server_resource,
+ name=filesystem_name)
+ fs_res_id = filesystem_resource.storage_resource.id
+ elif filesystem_id is not None:
+ if filesystem_id == "" or filesystem_id.isspace():
+ self.module.fail_json(msg="Invalid filesystem_id given,"
+ " Please provide a valid ID.")
+ filesystem_resource = self\
+ .get_filesystem_obj(id=filesystem_id)
+ fs_res_id = filesystem_resource[0].storage_resource.id
+
+ # Check for error, if user tries to create a filesystem snapshot
+ # with the same name.
+ if fs_snapshot and filesystem_resource and \
+ (fs_snapshot.storage_resource.id
+ != fs_res_id):
+ self.module.fail_json(
+ msg="Snapshot %s is of %s storage resource. Cannot create new"
+ " snapshot with same name for %s storage resource."
+ % (fs_snapshot.name, fs_snapshot.storage_resource.name,
+ filesystem_resource.storage_resource.name))
+
+ # check for valid expiry_time
+ if expiry_time is not None and \
+ (expiry_time == "" or expiry_time.isspace()):
+ self.module.fail_json(msg="Please provide valid expiry_time,"
+ " empty expiry_time given.")
+ if expiry_time:
+ self.validate_expiry_time(expiry_time)
+
+ # Check if in input auto_delete is True and expiry_time is not None
+ if expiry_time and auto_delete:
+ error_msg = "Cannot set expiry_time if auto_delete given as True."
+ LOG.info(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ # check for fs_access_type
+ if fs_access_type is not None:
+ if (fs_access_type == "" or fs_access_type.isspace()):
+ self.module.fail_json(msg="Please provide valid "
+ "fs_access_type, empty "
+ "fs_access_type given.")
+ if fs_access_type == "Checkpoint":
+ fs_access_type = utils.FilesystemSnapAccessTypeEnum.CHECKPOINT
+ elif fs_access_type == "Protocol":
+ fs_access_type = utils.FilesystemSnapAccessTypeEnum.PROTOCOL
+
+ # Check whether to modify the filesystem snapshot or not
+ fs_snap_modify_dict = dict()
+ if state == 'present' and fs_snapshot:
+ fs_snap_modify_dict = self\
+ .to_update(fs_snapshot, description=description,
+ auto_del=auto_delete, expiry_time=expiry_time,
+ fs_access_type=fs_access_type)
+
+ # Create Filesystem Snapshot
+ if not fs_snapshot and state == "present":
+ LOG.info("Creating the filesystem snapshot.")
+
+ if snapshot_id:
+ self.module.fail_json(msg="Creation of Filesystem Snapshot is"
+ " allowed using snapshot_name only,"
+ " snapshot_id given.")
+ if snapshot_name == "" or snapshot_name.isspace():
+ self.module.fail_json(msg="snapshot_name is required for"
+ " creation of the filesystem"
+ " snapshot, empty snapshot_name"
+ " given.")
+ if not filesystem_resource:
+ self.module.fail_json(msg="filesystem_name or filesystem_id"
+ " required to create a snapshot.")
+
+ fs_snapshot = self.create_filesystem_snapshot(
+ snapshot_name,
+ fs_res_id,
+ description,
+ auto_delete,
+ expiry_time,
+ fs_access_type)
+ changed = True
+
+ # Update the Snapshot
+ if fs_snapshot and state == "present" and fs_snap_modify_dict:
+ LOG.info("Updating the Filesystem Snapshot.")
+ self.update_filesystem_snapshot(fs_snapshot, fs_snap_modify_dict)
+ changed = True
+
+ # Delete the Filesystem Snapshot
+ if state == "absent" and fs_snapshot:
+ fs_snapshot = self.delete_fs_snapshot(fs_snapshot)
+ changed = True
+
+ # Add filesystem snapshot details to the result.
+ if fs_snapshot:
+ fs_snapshot.update()
+ self.result["filesystem_snapshot_details"] = \
+ self.create_fs_snapshot_details_dict(fs_snapshot)
+ else:
+ self.result["filesystem_snapshot_details"] = {}
+
+ self.result["changed"] = changed
+ self.module.exit_json(**self.result)
+
+
+def to_update_expiry_time(fs_snapshot, expiry_time=None):
+ """ Check whether to update expiry_time or not"""
+ if not expiry_time:
+ return False
+ if fs_snapshot.expiration_time is None:
+ return True
+ if convert_timestamp_to_sec(expiry_time, fs_snapshot.expiration_time)\
+ != 0:
+ return True
+ return False
+
+
+def convert_timestamp_to_sec(expiry_time, snap_time):
+ """Converts the time difference to seconds"""
+ snap_time_str = snap_time.strftime('%m/%d/%Y %H:%M')
+ snap_timestamp = datetime.strptime(snap_time_str, '%m/%d/%Y %H:%M')
+ expiry_timestamp = datetime.strptime(expiry_time, "%m/%d/%Y %H:%M")
+ return int((expiry_timestamp - snap_timestamp).total_seconds())
+
+
+def get_snapshot_parameters():
+ """This method provide parameter required for the ansible filesystem
+ snapshot module on Unity"""
+ return dict(
+ snapshot_name=dict(required=False, type='str'),
+ snapshot_id=dict(required=False, type='str'),
+ filesystem_name=dict(required=False, type='str'),
+ filesystem_id=dict(required=False, type='str'),
+ nas_server_name=dict(required=False, type='str'),
+ nas_server_id=dict(required=False, type='str'),
+ auto_delete=dict(required=False, type='bool'),
+ expiry_time=dict(required=False, type='str'),
+ description=dict(required=False, type='str'),
+ fs_access_type=dict(required=False, type='str',
+ choices=['Checkpoint', 'Protocol']),
+ state=dict(required=True, type='str', choices=['present', 'absent'])
+ )
+
+
+def main():
+ """ Create Unity Filesystem Snapshot object and perform actions on it
+ based on user input from playbook"""
+ obj = FilesystemSnapshot()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/host.py b/ansible_collections/dellemc/unity/plugins/modules/host.py
new file mode 100644
index 000000000..21a5fbae1
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/host.py
@@ -0,0 +1,1026 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing host on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: host
+
+version_added: '1.1.0'
+
+short_description: Manage Host operations on Unity
+
+description:
+- The Host module contains the operations
+ Creation of a Host,
+ Addition of initiators to Host,
+ Removal of initiators from Host,
+ Modification of host attributes,
+ Get details of a Host,
+ Deletion of a Host,
+ Addition of network address to Host,
+ Removal of network address from Host.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Rajshree Khare (@kharer5) <ansible.team@dell.com>
+
+options:
+ host_name:
+ description:
+ - Name of the host.
+ - Mandatory for host creation.
+ type: str
+
+ host_id:
+ description:
+ - Unique identifier of the host.
+ - Host Id is auto generated during creation.
+ - Except create, all other operations require either I(host_id) or Ihost_name).
+ type: str
+
+ description:
+ description:
+ - Host description.
+ type: str
+
+ host_os:
+ description:
+ - Operating system running on the host.
+ choices: ['AIX', 'Citrix XenServer', 'HP-UX', 'IBM VIOS', 'Linux',
+ 'Mac OS', 'Solaris', 'VMware ESXi', 'Windows Client', 'Windows Server']
+ type: str
+
+ new_host_name:
+ description:
+ - New name for the host.
+ - Only required in rename host operation.
+ type: str
+
+ initiators:
+ description:
+ - List of initiators to be added/removed to/from host.
+ type: list
+ elements: str
+
+ initiator_state:
+ description:
+ - State of the initiator.
+ choices: [present-in-host , absent-in-host]
+ type: str
+
+ network_address:
+ description:
+ - Network address to be added/removed to/from the host.
+ - Enter valid IPV4 or host name.
+ type: str
+
+ network_address_state:
+ description:
+ - State of the Network address.
+ choices: [present-in-host , absent-in-host]
+ type: str
+
+ state:
+ description:
+ - State of the host.
+ choices: [present , absent]
+ type: str
+ required: true
+
+notes:
+ - The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+- name: Create empty Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host"
+ host_os: "Linux"
+ description: "ansible-test-host"
+ state: "present"
+
+- name: Create Host with Initiators
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host-1"
+ host_os: "Linux"
+ description: "ansible-test-host-1"
+ initiators:
+ - "iqn.1994-05.com.redhat:c38e6e8cfd81"
+ - "20:00:00:90:FA:13:81:8D:10:00:00:90:FA:13:81:8D"
+ initiator_state: "present-in-host"
+ state: "present"
+
+- name: Modify Host using host_id
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_id: "Host_253"
+ new_host_name: "ansible-test-host-2"
+ host_os: "Mac OS"
+ description: "Ansible tesing purpose"
+ state: "present"
+
+- name: Add Initiators to Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host-2"
+ initiators:
+ - "20:00:00:90:FA:13:81:8C:10:00:00:90:FA:13:81:8C"
+ initiator_state: "present-in-host"
+ state: "present"
+
+- name: Get Host details using host_name
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host-2"
+ state: "present"
+
+- name: Get Host details using host_id
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_id: "Host_253"
+ state: "present"
+
+- name: Delete Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "ansible-test-host-2"
+ state: "absent"
+
+- name: Add network address to Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "{{host_name}}"
+ network_address: "192.168.1.2"
+ network_address_state: "present-in-host"
+ state: "present"
+
+- name: Delete network address from Host
+ dellemc.unity.host:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ host_name: "{{host_name}}"
+ network_address: "192.168.1.2"
+ network_address_state: "absent-in-host"
+ state: "present"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+
+host_details:
+ description: Details of the host.
+ returned: When host exists.
+ type: dict
+ contains:
+ id:
+ description: The system ID given to the host.
+ type: str
+ name:
+ description: The name of the host.
+ type: str
+ description:
+ description: Description about the host.
+ type: str
+ fc_host_initiators:
+ description: Details of the FC initiators associated with
+ the host.
+ type: list
+ contains:
+ id:
+ description: Unique identifier of the FC initiator path.
+ type: str
+ name:
+ description: FC Qualified Name (WWN) of the initiator.
+ type: str
+ paths:
+ description: Details of the paths associated with the FC initiator.
+ type: list
+ contains:
+ id:
+ description: Unique identifier of the path.
+ type: str
+ is_logged_in:
+ description: Indicates whether the host initiator is logged into the storage system.
+ type: bool
+ iscsi_host_initiators:
+ description: Details of the ISCSI initiators associated
+ with the host.
+ type: list
+ contains:
+ id:
+ description: Unique identifier of the ISCSI initiator path.
+ type: str
+ name:
+ description: ISCSI Qualified Name (IQN) of the initiator.
+ type: str
+ paths:
+ description: Details of the paths associated with the ISCSI initiator.
+ type: list
+ contains:
+ id:
+ description: Unique identifier of the path.
+ type: str
+ is_logged_in:
+ description: Indicates whether the host initiator is logged into the storage system.
+ type: bool
+ network_addresses:
+ description: List of network addresses mapped to the host.
+ type: list
+ os_type:
+ description: Operating system running on the host.
+ type: str
+ type:
+ description: HostTypeEnum of the host.
+ type: str
+ host_luns:
+ description: Details of luns attached to host.
+ type: list
+ sample: {
+ "auto_manage_type": "HostManageEnum.UNKNOWN",
+ "datastores": null,
+ "description": "ansible-test-host-1",
+ "existed": true,
+ "fc_host_initiators": [
+ {
+ "id": "HostInitiator_1",
+ "name": "HostName_1",
+ "paths": [
+ {
+ "id": "HostInitiator_1_Id1",
+ "is_logged_in": true
+ },
+ {
+ "id": "HostInitiator_1_Id2",
+ "is_logged_in": true
+ }
+ ]
+ }
+ ],
+ "hash": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
+ "health": {
+ "UnityHealth": {
+ "hash": 8764429420954
+ }
+ },
+ "host_container": null,
+ "host_luns": [],
+ "host_polled_uuid": null,
+ "host_pushed_uuid": null,
+ "host_uuid": null,
+ "host_v_vol_datastore": null,
+ "id": "Host_2198",
+ "iscsi_host_initiators": [
+ {
+ "id": "HostInitiator_2",
+ "name": "HostName_2",
+ "paths": [
+ {
+ "id": "HostInitiator_2_Id1",
+ "is_logged_in": true
+ },
+ {
+ "id": "HostInitiator_2_Id2",
+ "is_logged_in": true
+ }
+ ]
+ }
+ ],
+ "last_poll_time": null,
+ "name": "ansible-test-host-1",
+ "network_addresses": [],
+ "os_type": "Linux",
+ "registration_type": null,
+ "storage_resources": null,
+ "tenant": null,
+ "type": "HostTypeEnum.HOST_MANUAL",
+ "vms": null
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+import ipaddress
+
+LOG = utils.get_logger('host')
+
+application_type = "Ansible/1.6.0"
+
+
+class Host(object):
+ """Class with Host operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_host_parameters())
+
+ mutually_exclusive = [['host_name', 'host_id']]
+ required_one_of = [['host_name', 'host_id']]
+ required_together = [['network_address', 'network_address_state']]
+
+ """ initialize the ansible module """
+ self.module = AnsibleModule(argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+
+ self.unity = utils.get_unity_unisphere_connection(self.module.params, application_type)
+ LOG.info('Got the unity instance for provisioning on Unity')
+
+ def get_host_count(self, host_name):
+ """ To get the count of hosts with same host_name """
+
+ hosts = []
+ host_count = 0
+ hosts = utils.host.UnityHostList.get(cli=self.unity._cli,
+ name=host_name)
+ host_count = len(hosts)
+ return host_count
+
+ def get_host_details(self, host_id=None, host_name=None):
+ """ Get details of a given host """
+
+ host_id_or_name = host_id if host_id else host_name
+ try:
+ LOG.info("Getting host %s details", host_id_or_name)
+ if host_id:
+ host_details = self.unity.get_host(_id=host_id)
+ if host_details.name is None:
+ return None
+ if host_name:
+
+ ''' get the count of hosts with same host_name '''
+ host_count = self.get_host_count(host_name)
+
+ if host_count < 1:
+ return None
+ elif host_count > 1:
+ error_message = "Duplicate hosts found: There are "\
+ + host_count + " hosts(s) with the same" \
+ " host_name: " + host_name
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+ else:
+ host_details = self.unity.get_host(name=host_name)
+
+ return host_details
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ msg = 'Incorrect username or password provided.'
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ else:
+ msg = "Got HTTP Connection Error while getting host " \
+ "details %s : Error %s " % (host_id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except utils.UnityResourceNotFoundError as e:
+ error_message = "Failed to get details of host " \
+ "{0} with error {1}".format(host_id_or_name,
+ str(e))
+ LOG.error(error_message)
+ return None
+ except Exception as e:
+ error_message = "Got error %s while getting details of host %s" \
+ % (str(e), host_id_or_name)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def create_host(self, host_name):
+ """ Create a new host """
+ try:
+ description = self.module.params['description']
+ host_os = self.module.params['host_os']
+ host_type = utils.HostTypeEnum.HOST_MANUAL
+ initiators = self.module.params['initiators']
+ initiator_state = self.module.params['initiator_state']
+ empty_initiators_flag = False
+
+ if (initiators and initiator_state == 'absent-in-host'):
+ error_message = "Incorrect 'initiator_state' given."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ if (initiators is None or len(initiators) == 0
+ or not initiator_state
+ or initiator_state == 'absent-in-host'):
+ empty_initiators_flag = True
+
+ """ if any of the Initiators is invalid or already mapped """
+ if (initiators and initiator_state == 'present-in-host'):
+ unmapped_initiators \
+ = self.get_list_unmapped_initiators(initiators)
+ if unmapped_initiators is None \
+ or len(unmapped_initiators) < len(initiators):
+ error_message = "Provide valid initiators."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+ if not empty_initiators_flag:
+ self.validate_initiators(initiators)
+ LOG.info("Creating empty host %s ", host_name)
+ new_host = utils.host.UnityHost.create(self.unity._cli, name=host_name, desc=description,
+ os=host_os, host_type=host_type)
+ if not empty_initiators_flag:
+ host_details = self.unity.get_host(name=host_name)
+ LOG.info("Adding initiators to %s host", host_name)
+ result, new_host \
+ = self.add_initiator_to_host(host_details, initiators)
+ return True, new_host
+ except Exception as e:
+ error_message = "Got error %s while creation of host %s" \
+ % (str(e), host_name)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def validate_initiators(self, initiators):
+ results = []
+ for item in initiators:
+ results.append(utils.is_initiator_valid(item))
+ if False in results:
+ error_message = "One or more initiator provided is not valid, please provide valid initiators"
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def get_host_initiators_list(self, host_details):
+ """ Get the list of existing initiators in host"""
+
+ existing_initiators = []
+ if host_details.fc_host_initiators is not None:
+ fc_len = len(host_details.fc_host_initiators)
+ if fc_len > 0:
+ for count in range(fc_len):
+ """ get initiator 'wwn' id"""
+ ini_id \
+ = host_details.fc_host_initiators.initiator_id[count]
+
+ """ update existing_initiators list with 'wwn' """
+ existing_initiators.append(ini_id)
+
+ if host_details.iscsi_host_initiators is not None:
+ iscsi_len = len(host_details.iscsi_host_initiators)
+ if iscsi_len > 0:
+ for count in range(iscsi_len):
+ """ get initiator 'iqn' id"""
+ ini_id \
+ = host_details.iscsi_host_initiators.\
+ initiator_id[count]
+
+ """ update existing_initiators list with 'iqn' """
+ existing_initiators.append(ini_id)
+ return existing_initiators
+
+ def is_host_modified(self, host_details):
+ """ Determines whether the Host details are to be updated or not """
+ LOG.info("Checking host attribute values.")
+ modified_flag = False
+
+ if (self.module.params['description'] is not None
+ and self.module.params['description']
+ != host_details.description) \
+ or (self.module.params['host_os'] is not None
+ and self.module.params['host_os'] != host_details.os_type) \
+ or (self.module.params['new_host_name'] is not None
+ and self.module.params[
+ 'new_host_name'] != host_details.name) \
+ or (self.module.params['initiators'] is not None
+ and self.module.params['initiators']
+ != self.get_host_initiators_list(host_details)):
+ LOG.info("Modification required.")
+ modified_flag = True
+
+ return modified_flag
+
+ def modify_host(self, host_details, new_host_name=None, description=None,
+ host_os=None):
+ """ Modify a host """
+ try:
+ hosts = utils.host.UnityHostList.get(cli=self.unity._cli)
+ host_names_list = hosts.name
+ for name in host_names_list:
+ if new_host_name == name:
+ error_message = "Cannot modify name, new_host_name: " \
+ + new_host_name + " already in use."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+ host_details.modify(name=new_host_name, desc=description,
+ os=host_os)
+ return True
+
+ except Exception as e:
+ error_message = "Got error %s while modifying host %s" \
+ % (str(e), host_details.name)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def get_list_unmapped_initiators(self, initiators, host_id=None):
+ """ Get the list of those initiators which are
+ not mapped to any host"""
+
+ unmapped_initiators = []
+ for id in initiators:
+ initiator_details = utils.host.UnityHostInitiatorList \
+ .get(cli=self.unity._cli, initiator_id=id) \
+ ._get_properties()
+
+ """ if an already existing initiator is passed along with an
+ unmapped initiator"""
+ if None in initiator_details["parent_host"]:
+ unmapped_initiators.append(initiator_details
+ ["initiator_id"][0])
+ elif not initiator_details["parent_host"]:
+ unmapped_initiators.append(id)
+ else:
+ error_message = "Initiator " + id + " mapped to another Host."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+ return unmapped_initiators
+
+ def add_initiator_to_host(self, host_details, initiators):
+ """ Add initiator to host """
+
+ try:
+ existing_initiators = self.get_host_initiators_list(host_details)
+
+ """ if current and exisitng initiators are same"""
+ if initiators \
+ and (set(initiators).issubset(set(existing_initiators))):
+ LOG.info("Initiators are already present in host: %s",
+ host_details.name)
+ return False, host_details
+
+ """ get the list of non-mapped initiators out of the
+ given initiators"""
+ host_id = host_details.id
+ unmapped_initiators \
+ = self.get_list_unmapped_initiators(initiators, host_id)
+
+ """ if any of the Initiators is invalid or already mapped """
+ if unmapped_initiators is None \
+ or len(unmapped_initiators) < len(initiators):
+ error_message = "Provide valid initiators."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ LOG.info("Adding initiators to host %s", host_details.name)
+ for id in unmapped_initiators:
+ host_details.add_initiator(uid=id)
+ updated_host \
+ = self.unity.get_host(name=host_details.name)
+ return True, updated_host
+
+ except Exception as e:
+ error_message = "Got error %s while adding initiator to host %s" \
+ % (str(e), host_details.name)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def remove_initiator_from_host(self, host_details, initiators):
+ """ Remove initiator from host """
+
+ try:
+ existing_initiators = self.get_host_initiators_list(host_details)
+
+ if existing_initiators is None:
+ LOG.info("No exisiting initiators in host: %s",
+ host_details.name)
+ return False, host_details
+
+ if not (set(initiators).issubset(set(existing_initiators))):
+ LOG.info("Initiators already absent in host: %s",
+ host_details.name)
+ return False, host_details
+
+ LOG.info("Removing initiators from host %s", host_details.name)
+
+ if len(initiators) > 1:
+ self.check_if_initiators_logged_in(initiators)
+
+ for id in initiators:
+ initiator_details = utils.host.UnityHostInitiatorList \
+ .get(cli=self.unity._cli, initiator_id=id) \
+ ._get_properties()
+
+ """ if initiator has no active paths, then remove it """
+ if initiator_details["paths"][0] is None:
+ LOG.info("Initiator Path does not exist.")
+ host_details.delete_initiator(uid=id)
+ updated_host \
+ = self.unity.get_host(name=host_details.name)
+
+ else:
+ """ Checking for initiator logged_in state """
+ for path in initiator_details["paths"][0]["UnityHostInitiatorPathList"]:
+ path_id = path["UnityHostInitiatorPath"]["id"]
+
+ path_id_obj = utils.host.UnityHostInitiatorPathList \
+ .get(cli=self.unity._cli, _id=path_id)
+
+ path_id_details = path_id_obj._get_properties()
+
+ """ if is_logged_in is True, can't remove initiator"""
+ if (path_id_details["is_logged_in"]):
+ error_message = "Cannot remove initiator "\
+ + id + ", as it is logged in " \
+ "the with host."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ elif (not path_id_details["is_logged_in"]):
+ """ if is_logged_in is False, remove initiator """
+ path_id_obj.delete()
+
+ else:
+ """ if logged_in state does not exist """
+ error_message = " logged_in state does not " \
+ "exist for initiator " + id + "."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ host_details.delete_initiator(uid=id)
+ updated_host \
+ = self.unity.get_host(name=host_details.name)
+
+ return True, updated_host
+
+ except Exception as e:
+ error_message = "Got error %s while removing initiator from " \
+ "host %s" \
+ % (str(e), host_details.name)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def check_if_initiators_logged_in(self, initiators):
+ """ Checks if any of the initiators is of type logged-in"""
+
+ for item in initiators:
+ initiator_details = (utils.host.UnityHostInitiatorList
+ .get(cli=self.unity._cli, initiator_id=item)
+ ._get_properties())
+ if initiator_details["paths"][0] is not None and "UnityHostInitiatorPathList" in initiator_details["paths"][0]:
+ error_message = "Removal operation cannot be done since host has logged in initiator(s)"
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def delete_host(self, host_details):
+ """ Delete an existing host """
+
+ try:
+ host_details.delete()
+ return True
+ except Exception as e:
+ error_message = "Got error %s while deletion of host %s" \
+ % (str(e), host_details.name)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def get_iscsi_host_initiators_details(self, iscsi_host_initiators):
+ """ Get the details of existing ISCSI initiators in host"""
+
+ iscsi_initiator_list = []
+ for iscsi in iscsi_host_initiators:
+ iscsi_initiator_details = self.unity.get_initiator(_id=iscsi.id)
+ iscsi_path_list = []
+ if iscsi_initiator_details.paths is not None:
+ for path in iscsi_initiator_details.paths:
+ iscsi_path_list.append({
+ 'id': path.id,
+ 'is_logged_in': path.is_logged_in
+ })
+ iscsi_initiator_list.append({
+ 'id': iscsi_initiator_details.id,
+ 'name': iscsi_initiator_details.initiator_id,
+ 'paths': iscsi_path_list
+ })
+ return iscsi_initiator_list
+
+ def get_host_network_address_list(self, host_details):
+ network_address_list = []
+ if host_details and host_details.host_ip_ports is not None:
+ for port in host_details.host_ip_ports:
+ network_address_list.append(port.address)
+ return network_address_list
+
+ def manage_network_address(self, host_details, network_address_list,
+ network_address, network_address_state):
+ try:
+ is_mapped = False
+ changed = False
+ for addr in network_address_list:
+ if addr.lower() == network_address.lower():
+ is_mapped = True
+ break
+ if not is_mapped and network_address_state == 'present-in-host':
+ LOG.info("Adding network address %s to Host %s", network_address,
+ host_details.name)
+ host_details.add_ip_port(network_address)
+ changed = True
+ elif is_mapped and network_address_state == 'absent-in-host':
+ LOG.info("Deleting network address %s from Host %s", network_address,
+ host_details.name)
+ host_details.delete_ip_port(network_address)
+ changed = True
+
+ if changed:
+ updated_host = self.unity.get_host(name=host_details.name)
+ network_address_list = self.get_host_network_address_list(updated_host)
+ return network_address_list, changed
+ except Exception as e:
+ error_message = "Got error %s while modifying network address %s of host %s" \
+ % (str(e), network_address, host_details.name)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def get_host_lun_list(self, host_details):
+ """ Get luns attached to host"""
+ host_luns_list = []
+ if host_details and host_details.host_luns is not None:
+ for lun in host_details.host_luns.lun:
+ host_lun = {"name": lun.name, "id": lun.id}
+ host_luns_list.append(host_lun)
+ return host_luns_list
+
+ def get_fc_host_initiators_details(self, fc_host_initiators):
+ """ Get the details of existing FC initiators in host"""
+
+ fc_initiator_list = []
+ for fc in fc_host_initiators:
+ fc_initiator_details = self.unity.get_initiator(_id=fc.id)
+ fc_path_list = []
+ if fc_initiator_details.paths is not None:
+ for path in fc_initiator_details.paths:
+ fc_path_list.append({
+ 'id': path.id,
+ 'is_logged_in': path.is_logged_in
+ })
+ fc_initiator_list.append({
+ 'id': fc_initiator_details.id,
+ 'name': fc_initiator_details.initiator_id,
+ 'paths': fc_path_list
+ })
+ return fc_initiator_list
+
+ def perform_module_operation(self):
+ """ Perform different actions on host based on user parameter
+ chosen in playbook """
+
+ host_name = self.module.params['host_name']
+ host_id = self.module.params['host_id']
+ description = self.module.params['description']
+ host_os = self.module.params['host_os']
+ new_host_name = self.module.params['new_host_name']
+ initiator_state = self.module.params['initiator_state']
+ initiators = self.module.params['initiators']
+ network_address = self.module.params['network_address']
+ network_address_state = self.module.params['network_address_state']
+ state = self.module.params['state']
+
+ if host_name and len(host_name) > 255:
+ err_msg = "'host_name' is greater than 255 characters."
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ if new_host_name and len(new_host_name) > 255:
+ err_msg = "'new_host_name' is greater than 255 characters."
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ if description and len(description) > 255:
+ err_msg = "'description' is greater than 255 characters."
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ if not initiators and initiator_state:
+ err_msg = "'initiator_state' is given, " \
+ "'initiators' are not specified"
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ if not initiator_state and initiators:
+ err_msg = "'initiators' are given, " \
+ "'initiator_state' is not specified"
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ # result is a dictionary that contains changed status and
+ # host details
+ result = dict(
+ changed=False,
+ host_details={}
+ )
+
+ ''' Get host details based on host_name/host_id'''
+ host_details = self.get_host_details(host_id, host_name)
+ if not host_details and state == 'present':
+ if host_id:
+ err_msg = "Invalid argument 'host_id' while " \
+ "creating a host"
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+ if not host_name:
+ err_msg = "host_name is required to create a host"
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+ if new_host_name:
+ err_msg = "Invalid argument 'new_host_name' while " \
+ "creating a host"
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ if (initiators and initiator_state == 'absent-in-host'):
+ error_message = "Incorrect 'initiator_state' given."
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ # Create new host
+ LOG.info("Creating host: %s", host_name)
+ result['changed'], host_details \
+ = self.create_host(host_name)
+ result['host_details'] = host_details._get_properties()
+
+ # Modify host (Attributes and ADD/REMOVE Initiators)
+ elif (state == 'present' and host_details):
+ modified_flag = self.is_host_modified(host_details)
+ if modified_flag:
+
+ # Modify host
+ result['changed'] = self.modify_host(host_details,
+ new_host_name,
+ description,
+ host_os)
+ if new_host_name:
+ host_details = self.get_host_details(host_id,
+ new_host_name)
+ else:
+ host_details = self.get_host_details(host_id, host_name)
+ result['host_details'] = host_details._get_properties()
+
+ # Add Initiators to host
+ if (initiator_state == 'present-in-host' and initiators
+ and len(initiators) > 0):
+ LOG.info("Adding Initiators to Host %s",
+ host_details.name)
+ result['changed'], host_details \
+ = self.add_initiator_to_host(host_details, initiators)
+ result['host_details'] = host_details._get_properties()
+
+ else:
+ LOG.info('Host modification is not applicable, '
+ 'as none of the attributes has changed.')
+ result['changed'] = False
+ result['host_details'] = host_details._get_properties()
+
+ # Remove initiators from host
+ if (host_details and initiator_state == 'absent-in-host'
+ and initiators and len(initiators) > 0):
+ LOG.info("Removing Initiators from Host %s",
+ host_details.name)
+ result['changed'], host_details \
+ = self.remove_initiator_from_host(host_details,
+ initiators)
+ result['host_details'] = host_details._get_properties()
+
+ """ display WWN/IQN w.r.t. initiators mapped to host,
+ if host exists """
+ if host_details and host_details.fc_host_initiators is not None:
+ host_details.fc_host_initiators = self.get_fc_host_initiators_details(host_details.fc_host_initiators)
+ result['host_details'] = host_details._get_properties()
+ if host_details and host_details.iscsi_host_initiators is not None:
+ host_details.iscsi_host_initiators = self.get_iscsi_host_initiators_details(host_details.iscsi_host_initiators)
+ result['host_details'] = host_details._get_properties()
+
+ ''' Get host luns details and network addresses'''
+ if result['host_details']:
+ result['host_details']['host_luns'] = self.get_host_lun_list(host_details)
+ result['host_details']['network_addresses'] = self.get_host_network_address_list(host_details)
+ if 'host_ip_ports' in result['host_details']:
+ del result['host_details']['host_ip_ports']
+
+ # manage network address
+ if host_details is not None and network_address_state is not None:
+ self.validate_network_address_params(network_address)
+ network_address_list, changed = self.manage_network_address(
+ host_details,
+ result['host_details']['network_addresses'],
+ network_address,
+ network_address_state)
+ result['host_details']['network_addresses'] = network_address_list
+ result['changed'] = changed
+
+ # Delete a host
+ if state == 'absent':
+ if host_details:
+ LOG.info("Deleting host %s", host_details.name)
+ result['changed'] = self.delete_host(host_details)
+ else:
+ result['changed'] = False
+ result['host_details'] = []
+
+ self.module.exit_json(**result)
+
+ def validate_network_address_params(self, network_address):
+ if '.' in network_address and not is_valid_ip(network_address):
+ err_msg = 'Please enter valid IPV4 address for network address'
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ if len(network_address) < 1 or len(network_address) > 63:
+ err_msg = "'network_address' should be in range of 1 to 63 characters."
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ if utils.has_special_char(network_address) or ' ' in network_address:
+ err_msg = 'Please enter valid IPV4 address or host name for network address'
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+
+def is_valid_ip(address):
+ try:
+ ipaddress.ip_address(address)
+ return True
+ except ValueError:
+ return False
+
+
+def get_host_parameters():
+ """This method provides parameters required for the ansible host
+ module on Unity"""
+ return dict(
+ host_name=dict(required=False, type='str'),
+ host_id=dict(required=False, type='str'),
+ description=dict(required=False, type='str'),
+ host_os=dict(required=False, type='str',
+ choices=['AIX', 'Citrix XenServer', 'HP-UX',
+ 'IBM VIOS', 'Linux', 'Mac OS', 'Solaris',
+ 'VMware ESXi', 'Windows Client',
+ 'Windows Server']),
+ new_host_name=dict(required=False, type='str'),
+ initiators=dict(required=False, type='list', elements='str'),
+ initiator_state=dict(required=False, type='str',
+ choices=['present-in-host',
+ 'absent-in-host']),
+ network_address=dict(required=False, type='str'),
+ network_address_state=dict(required=False, type='str',
+ choices=['present-in-host',
+ 'absent-in-host']),
+ state=dict(required=True, type='str',
+ choices=['present', 'absent'])
+ )
+
+
+def main():
+ """ Create Unity host object and perform action on it
+ based on user input from playbook"""
+ obj = Host()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/info.py b/ansible_collections/dellemc/unity/plugins/modules/info.py
new file mode 100644
index 000000000..e89d86335
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/info.py
@@ -0,0 +1,1784 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for Gathering information about Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: info
+
+version_added: '1.1.0'
+
+short_description: Gathering information about Unity
+
+description:
+- Gathering information about Unity storage system includes
+ Get the details of Unity array,
+ Get list of Hosts in Unity array,
+ Get list of FC initiators in Unity array,
+ Get list of iSCSI initiators in Unity array,
+ Get list of Consistency groups in Unity array,
+ Get list of Storage pools in Unity array,
+ Get list of Volumes in Unity array,
+ Get list of Snapshot schedules in Unity array,
+ Get list of NAS servers in Unity array,
+ Get list of File systems in Unity array,
+ Get list of Snapshots in Unity array,
+ Get list of SMB shares in Unity array,
+ Get list of NFS exports in Unity array,
+ Get list of User quotas in Unity array,
+ Get list of Quota tree in Unity array,
+ Get list of NFS Servers in Unity array,
+ Get list of CIFS Servers in Unity array.
+ Get list of Ethernet ports in Unity array.
+ Get list of File interfaces used in Unity array.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Rajshree Khare (@kharer5) <ansible.team@dell.com>
+- Akash Shendge (@shenda1) <ansible.team@dell.com>
+- Meenakshi Dembi (@dembim) <ansible.team@dell.com>
+
+options:
+ gather_subset:
+ description:
+ - List of string variables to specify the Unity storage system entities
+ for which information is required.
+ choices: [host, fc_initiator, iscsi_initiator, cg, storage_pool, vol,
+ snapshot_schedule, nas_server, file_system, snapshot, nfs_export,
+ smb_share, user_quota, tree_quota, disk_group, nfs_server, cifs_server, ethernet_port, file_interface]
+ type: list
+ elements: str
+
+notes:
+ - The I(check_mode) is supported.
+'''
+
+EXAMPLES = r'''
+ - name: Get detailed list of Unity entities
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - host
+ - fc_initiator
+ - iscsi_initiator
+ - cg
+ - storage_pool
+ - vol
+ - snapshot_schedule
+ - nas_server
+ - file_system
+ - snapshot
+ - nfs_export
+ - smb_share
+ - user_quota
+ - tree_quota
+ - disk_group
+ - nfs_server
+ - cifs_server
+ - ethernet_port
+ - file_interface
+
+ - name: Get information of Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+
+ - name: Get list of hosts on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - host
+
+ - name: Get list of FC initiators on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - fc_initiator
+
+ - name: Get list of ISCSI initiators on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - iscsi_initiator
+
+ - name: Get list of consistency groups on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - cg
+
+ - name: Get list of storage pools on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - storage_pool
+
+ - name: Get list of volumes on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - vol
+
+ - name: Get list of snapshot schedules on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - snapshot_schedule
+
+ - name: Get list of NAS Servers on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - nas_server
+
+ - name: Get list of File Systems on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - file_system
+
+ - name: Get list of Snapshots on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - snapshot
+
+ - name: Get list of NFS exports on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - nfs_export
+
+ - name: Get list of SMB shares on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - smb_share
+
+ - name: Get list of user quotas on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - user_quota
+
+ - name: Get list of quota trees on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - tree_quota
+
+ - name: Get list of disk groups on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - disk_group
+
+ - name: Get list of NFS Servers on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - nfs_server
+
+ - name: Get list of CIFS Servers on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - cifs_server
+
+ - name: Get list of ethernet ports on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - ethernet_port
+
+ - name: Get list of file interfaces on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - file_interface
+'''
+
+RETURN = r'''
+Array_Details:
+ description: Details of the Unity Array.
+ returned: always
+ type: dict
+ contains:
+ api_version:
+ description: The current api version of the Unity Array.
+ type: str
+ earliest_api_version:
+ description: The earliest api version of the Unity Array.
+ type: str
+ model:
+ description: The model of the Unity Array.
+ type: str
+ name:
+ description: The name of the Unity Array.
+ type: str
+ software_version:
+ description: The software version of the Unity Array.
+ type: str
+ sample: {
+ "api_version": "12.0",
+ "earliest_api_version": "4.0",
+ "existed": true,
+ "hash": 8766644083532,
+ "id": "0",
+ "model": "Unity 480",
+ "name": "APM00213404195",
+ "software_version": "5.2.1"
+ }
+
+Hosts:
+ description: Details of the hosts.
+ returned: When hosts exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the host.
+ type: str
+ name:
+ description: The name of the host.
+ type: str
+ sample: [
+ {
+ "auto_manage_type": "HostManageEnum.UNKNOWN",
+ "datastores": null,
+ "description": "",
+ "existed": true,
+ "fc_host_initiators": null,
+ "hash": 8762200072289,
+ "health": {
+ "UnityHealth": {
+ "hash": 8762200072352
+ }
+ },
+ "host_container": null,
+ "host_ip_ports": {
+ "UnityHostIpPortList": [
+ {
+ "UnityHostIpPort": {
+ "hash": 8762200072361
+ }
+ }
+ ]
+ },
+ "host_luns": null,
+ "host_polled_uuid": null,
+ "host_pushed_uuid": null,
+ "host_uuid": null,
+ "host_v_vol_datastore": null,
+ "id": "Host_2191",
+ "iscsi_host_initiators": null,
+ "last_poll_time": null,
+ "name": "10.225.2.153",
+ "os_type": "Linux",
+ "registration_type": null,
+ "storage_resources": null,
+ "tenant": null,
+ "type": "HostTypeEnum.HOST_MANUAL",
+ "vms": null
+ }
+ ]
+
+FC_initiators:
+ description: Details of the FC initiators.
+ returned: When FC initiator exist.
+ type: list
+ contains:
+ WWN:
+ description: The WWN of the FC initiator.
+ type: str
+ id:
+ description: The id of the FC initiator.
+ type: str
+ sample: [
+ {
+ "WWN": "20:00:00:0E:1E:E9:B8:FC:21:00:00:0E:1E:E9:B8:FC",
+ "id": "HostInitiator_3"
+ },
+ {
+ "WWN": "20:00:00:0E:1E:E9:B8:F7:21:00:00:0E:1E:E9:B8:F7",
+ "id": "HostInitiator_4"
+ }
+ ]
+
+ISCSI_initiators:
+ description: Details of the ISCSI initiators.
+ returned: When ISCSI initiators exist.
+ type: list
+ contains:
+ IQN:
+ description: The IQN of the ISCSI initiator.
+ type: str
+ id:
+ description: The id of the ISCSI initiator.
+ type: str
+ sample: [
+ {
+ "IQN": "iqn.1994-05.com.redhat:634d768090f",
+ "id": "HostInitiator_1"
+ },
+ {
+ "IQN": "iqn.1994-05.com.redhat:2835ba62cc6d",
+ "id": "HostInitiator_2"
+ }
+ ]
+
+Consistency_Groups:
+ description: Details of the Consistency Groups.
+ returned: When Consistency Groups exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the Consistency Group.
+ type: str
+ name:
+ description: The name of the Consistency Group.
+ type: str
+ sample: [
+ {
+ "advanced_dedup_status": "DedupStatusEnum.DISABLED",
+ "block_host_access": {
+ "UnityBlockHostAccessList": [
+ {
+ "UnityBlockHostAccess": {
+ "hash": 8745385821206
+ }
+ },
+ {
+ "UnityBlockHostAccess": {
+ "hash": 8745386530115
+ }
+ },
+ {
+ "UnityBlockHostAccess": {
+ "hash": 8745386530124
+ }
+ }
+ ]
+ },
+ "data_reduction_percent": 0,
+ "data_reduction_ratio": 1.0,
+ "data_reduction_size_saved": 0,
+ "data_reduction_status": "DataReductionStatusEnum.DISABLED",
+ "datastores": null,
+ "dedup_status": null,
+ "description": "CG has created with all parametres.",
+ "esx_filesystem_block_size": null,
+ "esx_filesystem_major_version": null,
+ "existed": true,
+ "filesystem": null,
+ "hash": 8745385801328,
+ "health": {
+ "UnityHealth": {
+ "hash": 8745386647098
+ }
+ },
+ "host_v_vol_datastore": null,
+ "id": "res_93",
+ "is_replication_destination": false,
+ "is_snap_schedule_paused": false,
+ "luns": {
+ "UnityLunList": [
+ {
+ "UnityLun": {
+ "hash": 8745389830024,
+ "id": "sv_64"
+ }
+ },
+ {
+ "UnityLun": {
+ "hash": 8745386526751,
+ "id": "sv_63"
+ }
+ }
+ ]
+ },
+ "metadata_size": 8858370048,
+ "metadata_size_allocated": 7516192768,
+ "name": "CG1_Ansible_Test_SS",
+ "per_tier_size_used": [
+ 11811160064,
+ 0,
+ 0
+ ],
+ "pools": {
+ "UnityPoolList": [
+ {
+ "UnityPool": {
+ "hash": 8745386552375,
+ "id": "pool_3"
+ }
+ }
+ ]
+ },
+ "relocation_policy": "TieringPolicyEnum.AUTOTIER",
+ "replication_type": "ReplicationTypeEnum.NONE",
+ "size_allocated": 99418112,
+ "size_total": 268435456000,
+ "size_used": null,
+ "snap_count": 1,
+ "snap_schedule": {
+ "UnitySnapSchedule": {
+ "hash": 8745386550224,
+ "id": "snapSch_66"
+ }
+ },
+ "snaps_size_allocated": 8888320,
+ "snaps_size_total": 108675072,
+ "thin_status": "ThinStatusEnum.TRUE",
+ "type": "StorageResourceTypeEnum.CONSISTENCY_GROUP",
+ "virtual_volumes": null,
+ "vmware_uuid": null
+ },
+ ]
+
+Storage_Pools:
+ description: Details of the Storage Pools.
+ returned: When Storage Pools exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the Storage Pool.
+ type: str
+ name:
+ description: The name of the Storage Pool.
+ type: str
+ sample: [
+ {
+ "alert_threshold": 70,
+ "creation_time": "2021-10-18 12:45:12+00:00",
+ "description": "",
+ "existed": true,
+ "harvest_state": "UsageHarvestStateEnum.PAUSED_COULD_NOT_REACH_HWM",
+ "hash": 8741501012399,
+ "health": {
+ "UnityHealth": {
+ "hash": 8741501012363
+ }
+ },
+ "id": "pool_2",
+ "is_all_flash": false,
+ "is_empty": false,
+ "is_fast_cache_enabled": false,
+ "is_harvest_enabled": true,
+ "is_snap_harvest_enabled": false,
+ "metadata_size_subscribed": 312458870784,
+ "metadata_size_used": 244544700416,
+ "name": "fastVP_pool",
+ "object_id": 12884901891,
+ "pool_fast_vp": {
+ "UnityPoolFastVp": {
+ "hash": 8741501228023
+ }
+ },
+ "pool_space_harvest_high_threshold": 95.0,
+ "pool_space_harvest_low_threshold": 85.0,
+ "pool_type": "StoragePoolTypeEnum.TRADITIONAL",
+ "raid_type": "RaidTypeEnum.RAID5",
+ "rebalance_progress": null,
+ "size_free": 2709855928320,
+ "size_subscribed": 2499805044736,
+ "size_total": 3291018690560,
+ "size_used": 455513956352,
+ "snap_size_subscribed": 139720515584,
+ "snap_size_used": 66002944,
+ "snap_space_harvest_high_threshold": 25.0,
+ "snap_space_harvest_low_threshold": 20.0,
+ "tiers": {
+ "UnityPoolTierList": [
+ {
+ "UnityPoolTier": {
+ "hash": 8741500996410
+ }
+ },
+ {
+ "UnityPoolTier": {
+ "hash": 8741501009430
+ }
+ },
+ {
+ "UnityPoolTier": {
+ "hash": 8741501009508
+ }
+ }
+ ]
+ }
+ },
+ ]
+
+Volumes:
+ description: Details of the Volumes.
+ returned: When Volumes exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the Volume.
+ type: str
+ name:
+ description: The name of the Volume.
+ type: str
+ sample: [
+ {
+ "current_node": "NodeEnum.SPB",
+ "data_reduction_percent": 0,
+ "data_reduction_ratio": 1.0,
+ "data_reduction_size_saved": 0,
+ "default_node": "NodeEnum.SPB",
+ "description": null,
+ "effective_io_limit_max_iops": null,
+ "effective_io_limit_max_kbps": null,
+ "existed": true,
+ "family_base_lun": {
+ "UnityLun": {
+ "hash": 8774260820794,
+ "id": "sv_27"
+ }
+ },
+ "family_clone_count": 0,
+ "hash": 8774260854260,
+ "health": {
+ "UnityHealth": {
+ "hash": 8774260812499
+ }
+ },
+ "host_access": {
+ "UnityBlockHostAccessList": [
+ {
+ "UnityBlockHostAccess": {
+ "hash": 8774260826387
+ }
+ }
+ ]
+ },
+ "id": "sv_27",
+ "io_limit_policy": null,
+ "is_advanced_dedup_enabled": false,
+ "is_compression_enabled": null,
+ "is_data_reduction_enabled": false,
+ "is_replication_destination": false,
+ "is_snap_schedule_paused": false,
+ "is_thin_clone": false,
+ "is_thin_enabled": false,
+ "metadata_size": 4294967296,
+ "metadata_size_allocated": 4026531840,
+ "name": "VSI-UNITY-test-task",
+ "per_tier_size_used": [
+ 111400714240,
+ 0,
+ 0
+ ],
+ "pool": {
+ "UnityPool": {
+ "hash": 8774260811427
+ }
+ },
+ "size_allocated": 107374182400,
+ "size_total": 107374182400,
+ "size_used": null,
+ "snap_count": 0,
+ "snap_schedule": null,
+ "snap_wwn": "60:06:01:60:5C:F0:50:00:94:3E:91:4D:51:5A:4F:97",
+ "snaps_size": 0,
+ "snaps_size_allocated": 0,
+ "storage_resource": {
+ "UnityStorageResource": {
+ "hash": 8774267822228
+ }
+ },
+ "tiering_policy": "TieringPolicyEnum.AUTOTIER_HIGH",
+ "type": "LUNTypeEnum.VMWARE_ISCSI",
+ "wwn": "60:06:01:60:5C:F0:50:00:00:B5:95:61:2E:34:DB:B2"
+ },
+ ]
+
+Snapshot_Schedules:
+ description: Details of the Snapshot Schedules.
+ returned: When Snapshot Schedules exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the Snapshot Schedule.
+ type: str
+ name:
+ description: The name of the Snapshot Schedule.
+ type: str
+ sample: [
+ {
+ "existed": true,
+ "hash": 8775599492651,
+ "id": "snapSch_1",
+ "is_default": true,
+ "is_modified": null,
+ "is_sync_replicated": false,
+ "luns": null,
+ "modification_time": "2021-08-18 19:10:33.774000+00:00",
+ "name": "CEM_DEFAULT_SCHEDULE_DEFAULT_PROTECTION",
+ "rules": {
+ "UnitySnapScheduleRuleList": [
+ {
+ "UnitySnapScheduleRule": {
+ "hash": 8775599498593
+ }
+ }
+ ]
+ },
+ "storage_resources": {
+ "UnityStorageResourceList": [
+ {
+ "UnityStorageResource": {
+ "hash": 8775599711597,
+ "id": "res_88"
+ }
+ },
+ {
+ "UnityStorageResource": {
+ "hash": 8775599711528,
+ "id": "res_3099"
+ }
+ }
+ ]
+ },
+ "version": "ScheduleVersionEnum.LEGACY"
+ },
+ ]
+
+NAS_Servers:
+ description: Details of the NAS Servers.
+ returned: When NAS Servers exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the NAS Server.
+ type: str
+ name:
+ description: The name of the NAS Server.
+ type: str
+ sample: [
+ {
+ "allow_unmapped_user": null,
+ "cifs_server": null,
+ "current_sp": {
+ "UnityStorageProcessor": {
+ "hash": 8747629920422,
+ "id": "spb"
+ }
+ },
+ "current_unix_directory_service": "NasServerUnixDirectoryServiceEnum.NONE",
+ "default_unix_user": null,
+ "default_windows_user": null,
+ "existed": true,
+ "file_dns_server": null,
+ "file_interface": {
+ "UnityFileInterfaceList": [
+ {
+ "UnityFileInterface": {
+ "hash": 8747626606870,
+ "id": "if_6"
+ }
+ }
+ ]
+ },
+ "filesystems": {
+ "UnityFileSystemList": [
+ {
+ "UnityFileSystem": {
+ "hash": 8747625901355,
+ "id": "fs_6892"
+ }
+ },
+ ]
+ },
+ "hash": 8747625900370,
+ "health": {
+ "UnityHealth": {
+ "hash": 8747625900493
+ }
+ },
+ "home_sp": {
+ "UnityStorageProcessor": {
+ "hash": 8747625877420,
+ "id": "spb"
+ }
+ },
+ "id": "nas_1",
+ "is_backup_only": false,
+ "is_multi_protocol_enabled": false,
+ "is_packet_reflect_enabled": false,
+ "is_replication_destination": false,
+ "is_replication_enabled": false,
+ "is_windows_to_unix_username_mapping_enabled": null,
+ "name": "lglad072",
+ "pool": {
+ "UnityPool": {
+ "hash": 8747629920479,
+ "id": "pool_3"
+ }
+ },
+ "preferred_interface_settings": {
+ "UnityPreferredInterfaceSettings": {
+ "hash": 8747626625166,
+ "id": "preferred_if_1"
+ }
+ },
+ "replication_type": "ReplicationTypeEnum.NONE",
+ "size_allocated": 2952790016,
+ "tenant": null,
+ "virus_checker": {
+ "UnityVirusChecker": {
+ "hash": 8747626604144,
+ "id": "cava_1"
+ }
+ }
+ },
+ ]
+
+File_Systems:
+ description: Details of the File Systems.
+ returned: When File Systems exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the File System.
+ type: str
+ name:
+ description: The name of the File System.
+ type: str
+ sample: [
+ {
+ "access_policy": "AccessPolicyEnum.UNIX",
+ "cifs_notify_on_change_dir_depth": 512,
+ "cifs_share": null,
+ "data_reduction_percent": 0,
+ "data_reduction_ratio": 1.0,
+ "data_reduction_size_saved": 0,
+ "description": "",
+ "existed": true,
+ "folder_rename_policy": "FSRenamePolicyEnum.SMB_RENAME_FORBIDDEN",
+ "format": "FSFormatEnum.UFS64",
+ "hash": 8786518053735,
+ "health": {
+ "UnityHealth": {
+ "hash": 8786518049091
+ }
+ },
+ "host_io_size": "HostIOSizeEnum.GENERAL_8K",
+ "id": "fs_12",
+ "is_advanced_dedup_enabled": false,
+ "is_cifs_notify_on_access_enabled": false,
+ "is_cifs_notify_on_write_enabled": false,
+ "is_cifs_op_locks_enabled": true,
+ "is_cifs_sync_writes_enabled": false,
+ "is_data_reduction_enabled": false,
+ "is_read_only": false,
+ "is_smbca": false,
+ "is_thin_enabled": true,
+ "locking_policy": "FSLockingPolicyEnum.MANDATORY",
+ "metadata_size": 4294967296,
+ "metadata_size_allocated": 3758096384,
+ "min_size_allocated": 0,
+ "name": "vro-daniel-test",
+ "nas_server": {
+ "UnityNasServer": {
+ "hash": 8786517296113,
+ "id": "nas_1"
+ }
+ },
+ "nfs_share": null,
+ "per_tier_size_used": [
+ 6442450944,
+ 0,
+ 0
+ ],
+ "pool": {
+ "UnityPool": {
+ "hash": 8786518259493,
+ "id": "pool_3"
+ }
+ },
+ "pool_full_policy": "ResourcePoolFullPolicyEnum.FAIL_WRITES",
+ "size_allocated": 283148288,
+ "size_allocated_total": 4041244672,
+ "size_preallocated": 2401206272,
+ "size_total": 107374182400,
+ "size_used": 1620312064,
+ "snap_count": 0,
+ "snaps_size": 0,
+ "snaps_size_allocated": 0,
+ "storage_resource": {
+ "UnityStorageResource": {
+ "hash": 8786518044167,
+ "id": "res_20"
+ }
+ },
+ "supported_protocols": "FSSupportedProtocolEnum.NFS",
+ "tiering_policy": "TieringPolicyEnum.AUTOTIER_HIGH",
+ "type": "FilesystemTypeEnum.FILESYSTEM"
+ },
+ ]
+
+Snapshots:
+ description: Details of the Snapshots.
+ returned: When Snapshots exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the Snapshot.
+ type: str
+ name:
+ description: The name of the Snapshot.
+ type: str
+ sample: [
+ {
+ "access_type": "FilesystemSnapAccessTypeEnum.CHECKPOINT",
+ "attached_wwn": null,
+ "creation_time": "2022-04-06 11:19:26.818000+00:00",
+ "creator_schedule": null,
+ "creator_type": "SnapCreatorTypeEnum.REP_V2",
+ "creator_user": null,
+ "description": "",
+ "existed": true,
+ "expiration_time": null,
+ "hash": 8739100256648,
+ "host_access": null,
+ "id": "38654716464",
+ "io_limit_policy": null,
+ "is_auto_delete": false,
+ "is_modifiable": false,
+ "is_modified": false,
+ "is_read_only": true,
+ "is_system_snap": true,
+ "last_writable_time": null,
+ "lun": {
+ "UnityLun": {
+ "hash": 8739100148962,
+ "id": "sv_301"
+ }
+ },
+ "name": "42949677504_APM00213404195_0000.ckpt000_9508038064690266.2_238",
+ "parent_snap": null,
+ "size": 3221225472,
+ "snap_group": null,
+ "state": "SnapStateEnum.READY",
+ "storage_resource": {
+ "UnityStorageResource": {
+ "hash": 8739100173002,
+ "id": "sv_301"
+ }
+ }
+ },
+ ]
+
+NFS_Exports:
+ description: Details of the NFS Exports.
+ returned: When NFS Exports exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the NFS Export.
+ type: str
+ name:
+ description: The name of the NFS Export.
+ type: str
+ sample: [
+ {
+ "anonymous_gid": 4294967294,
+ "anonymous_uid": 4294967294,
+ "creation_time": "2021-12-01 06:21:48.381000+00:00",
+ "default_access": "NFSShareDefaultAccessEnum.NO_ACCESS",
+ "description": "",
+ "existed": true,
+ "export_option": 1,
+ "export_paths": [
+ "10.230.24.20:/zack_nfs_01"
+ ],
+ "filesystem": {
+ "UnityFileSystem": {
+ "hash": 8747298565566,
+ "id": "fs_67"
+ }
+ },
+ "hash": 8747298565548,
+ "host_accesses": null,
+ "id": "NFSShare_29",
+ "is_read_only": null,
+ "min_security": "NFSShareSecurityEnum.SYS",
+ "modification_time": "2022-04-01 11:44:17.553000+00:00",
+ "name": "zack_nfs_01",
+ "nfs_owner_username": null,
+ "no_access_hosts": null,
+ "no_access_hosts_string": "10.226.198.207,10.226.198.25,10.226.198.44,10.226.198.85,Host1,
+Host2,Host4,Host5,Host6,10.10.0.0/255.255.240.0",
+ "path": "/",
+ "read_only_hosts": null,
+ "read_only_hosts_string": "",
+ "read_only_root_access_hosts": null,
+ "read_only_root_hosts_string": "",
+ "read_write_hosts": null,
+ "read_write_hosts_string": "",
+ "read_write_root_hosts_string": "",
+ "role": "NFSShareRoleEnum.PRODUCTION",
+ "root_access_hosts": null,
+ "snap": null,
+ "type": "NFSTypeEnum.NFS_SHARE"
+ },
+ ]
+
+SMB_Shares:
+ description: Details of the SMB Shares.
+ returned: When SMB Shares exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the SMB Share.
+ type: str
+ name:
+ description: The name of the SMB Share.
+ type: str
+ sample: [
+ {
+ "creation_time": "2022-03-17 11:56:54.867000+00:00",
+ "description": "",
+ "existed": true,
+ "export_paths": [
+ "\\\\multi-prot-pie.extreme1.com\\multi-prot-hui",
+ "\\\\10.230.24.26\\multi-prot-hui"
+ ],
+ "filesystem": {
+ "UnityFileSystem": {
+ "hash": 8741295638110,
+ "id": "fs_140"
+ }
+ },
+ "hash": 8741295638227,
+ "id": "SMBShare_20",
+ "is_abe_enabled": false,
+ "is_ace_enabled": false,
+ "is_branch_cache_enabled": false,
+ "is_continuous_availability_enabled": false,
+ "is_dfs_enabled": false,
+ "is_encryption_enabled": false,
+ "is_read_only": null,
+ "modified_time": "2022-03-17 11:56:54.867000+00:00",
+ "name": "multi-prot-hui",
+ "offline_availability": "CifsShareOfflineAvailabilityEnum.NONE",
+ "path": "/",
+ "snap": null,
+ "type": "CIFSTypeEnum.CIFS_SHARE",
+ "umask": "022"
+ },
+ ]
+
+User_Quotas:
+ description: Details of the user quotas.
+ returned: When user quotas exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the user quota.
+ type: str
+ uid:
+ description: The UID of the user quota.
+ type: str
+ sample: [
+ {
+ "id": "userquota_171798694698_0_60000",
+ "uid": 60000
+ },
+ {
+ "id": "userquota_171798694939_0_5001",
+ "uid": 5001
+ }
+ ]
+
+Tree_Quotas:
+ description: Details of the quota trees.
+ returned: When quota trees exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the quota tree.
+ type: str
+ path:
+ description: The path of the quota tree.
+ type: str
+ sample: [
+ {
+ "id": "treequota_171798709589_1",
+ "path": "/vro-ui-fs-rkKfimmN"
+ },
+ {
+ "id": "treequota_171798709590_1",
+ "path": "/vro-ui-fs-mGYXAMqk"
+ }
+ ]
+
+Disk_Groups:
+ description: Details of the disk groups.
+ returned: When disk groups exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the disk group.
+ type: str
+ name:
+ description: The name of the disk group.
+ type: str
+ tier_type:
+ description: The tier type of the disk group.
+ type: str
+ sample: [
+ {
+ "id": "dg_3",
+ "name": "400 GB SAS Flash 2",
+ "tier_type": "EXTREME_PERFORMANCE"
+ },
+ {
+ "id": "dg_16",
+ "name": "600 GB SAS 10K",
+ "tier_type": "PERFORMANCE"
+ }
+ ]
+
+NFS_Servers:
+ description: Details of the NFS Servers.
+ returned: When NFS Servers exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the NFS Servers.
+ type: str
+ sample: [
+ {
+ "id": "nfs_3",
+ },
+ {
+ "id": "nfs_4",
+ },
+ {
+ "id": "nfs_9",
+ }
+ ]
+CIFS_Servers:
+ description: Details of the CIFS Servers.
+ returned: When CIFS Servers exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the CIFS Servers.
+ type: str
+ name:
+ description: The name of the CIFS server.
+ type: str
+ sample: [
+ {
+ "id": "cifs_3",
+ "name": "test_cifs_1"
+ },
+ {
+ "id": "cifs_4",
+ "name": "test_cifs_2"
+ },
+ {
+ "id": "cifs_9",
+ "name": "test_cifs_3"
+ }
+ ]
+Ethernet_ports:
+ description: Details of the ethernet ports.
+ returned: When ethernet ports exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the ethernet port.
+ type: str
+ name:
+ description: The name of the ethernet port.
+ type: str
+ sample: [
+ {
+ "id": "spa_mgmt",
+ "name": "SP A Management Port"
+ },
+ {
+ "id": "spa_ocp_0_eth0",
+ "name": "SP A 4-Port Card Ethernet Port 0"
+ },
+ {
+ "id": "spa_ocp_0_eth1",
+ "name": "SP A 4-Port Card Ethernet Port 1"
+ }
+ ]
+File_interfaces:
+ description: Details of the file inetrfaces.
+ returned: When file inetrface exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the file inetrface.
+ type: str
+ name:
+ description: The name of the file inetrface.
+ type: str
+ ip_address:
+ description: IP address of the file inetrface.
+ type: str
+ sample: [
+ {
+ "id": "if_3",
+ "ip_address": "xx.xx.xx.xx",
+ "name": "1_APMXXXXXXXXXX"
+ },
+ {
+ "id": "if_3",
+ "ip_address": "xx.xx.xx.xx",
+ "name": "2_APMXXXXXXXXXX"
+ },
+ {
+ "id": "if_3",
+ "ip_address": "xx.xx.xx.xx",
+ "name": "3_APMXXXXXXXXXX"
+ }
+ ]
+'''
+
+from re import sub
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('info')
+SUCCESSFULL_LISTED_MSG = 'Successfully listed.'
+
+application_type = "Ansible/1.6.0"
+
+
+class Info(object):
+ """Class with Info operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_info_parameters())
+
+ """ initialize the ansible module """
+ self.module = AnsibleModule(argument_spec=self.module_params,
+ supports_check_mode=True)
+ utils.ensure_required_libs(self.module)
+
+ self.unity = utils.get_unity_unisphere_connection(self.module.params,
+ application_type)
+ LOG.info('Got the unity instance for provisioning on Unity')
+
+ def get_array_details(self):
+ """ Get the list of snapshot schedules on a given Unity storage
+ system """
+
+ try:
+ LOG.info('Getting array details ')
+ array_details = self.unity.info
+ return array_details._get_properties()
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ msg = 'Incorrect username or password provided.'
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ else:
+ msg = str(e)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except Exception as e:
+ msg = 'Get array details from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_hosts_list(self):
+ """ Get the list of hosts on a given Unity storage system """
+
+ try:
+ LOG.info('Getting hosts list ')
+ hosts = self.unity.get_host()
+ return result_list(hosts)
+
+ except Exception as e:
+ msg = 'Get hosts list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_fc_initiators_list(self):
+ """ Get the list of FC Initiators on a given Unity storage system """
+
+ try:
+ LOG.info('Getting FC initiators list ')
+ fc_initiator = utils.host.UnityHostInitiatorList \
+ .get(cli=self.unity._cli, type=utils.HostInitiatorTypeEnum.FC)
+ return fc_initiators_result_list(fc_initiator)
+
+ except Exception as e:
+ msg = 'Get FC initiators list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_iscsi_initiators_list(self):
+ """ Get the list of ISCSI initiators on a given Unity storage
+ system """
+
+ try:
+ LOG.info('Getting ISCSI initiators list ')
+ iscsi_initiator = utils.host.UnityHostInitiatorList \
+ .get(cli=self.unity._cli, type=utils.HostInitiatorTypeEnum.
+ ISCSI)
+ return iscsi_initiators_result_list(iscsi_initiator)
+
+ except Exception as e:
+ msg = 'Get ISCSI initiators list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_consistency_groups_list(self):
+ """ Get the list of consistency groups on a given Unity storage
+ system """
+
+ try:
+ LOG.info('Getting consistency groups list ')
+ consistency_groups = utils.cg.UnityConsistencyGroupList \
+ .get(self.unity._cli)
+ return result_list(consistency_groups)
+
+ except Exception as e:
+ msg = 'Get consistency groups list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_storage_pools_list(self):
+ """ Get the list of storage pools on a given Unity storage
+ system """
+
+ try:
+ LOG.info('Getting storage pools list ')
+ storage_pools = self.unity.get_pool()
+ return result_list(storage_pools)
+
+ except Exception as e:
+ msg = 'Get storage pools list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_volumes_list(self):
+ """ Get the list of volumes on a given Unity storage
+ system """
+
+ try:
+ LOG.info('Getting volumes list ')
+ volumes = self.unity.get_lun()
+ return result_list(volumes)
+
+ except Exception as e:
+ msg = 'Get volumes list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_snapshot_schedules_list(self):
+ """ Get the list of snapshot schedules on a given Unity storage
+ system """
+
+ try:
+ LOG.info('Getting snapshot schedules list ')
+ snapshot_schedules = utils.snap_schedule.UnitySnapScheduleList \
+ .get(cli=self.unity._cli)
+ return result_list(snapshot_schedules)
+
+ except Exception as e:
+ msg = 'Get snapshot schedules list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_nas_servers_list(self):
+ """Get the list of NAS servers on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting NAS servers list")
+ nas_servers = self.unity.get_nas_server()
+ return result_list(nas_servers)
+
+ except Exception as e:
+ msg = 'Get NAS servers list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_file_systems_list(self):
+ """Get the list of file systems on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting file systems list")
+ file_systems = self.unity.get_filesystem()
+ return result_list(file_systems)
+
+ except Exception as e:
+ msg = 'Get file systems list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_snapshots_list(self):
+ """Get the list of snapshots on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting snapshots list")
+ snapshots = self.unity.get_snap()
+ return result_list(snapshots)
+
+ except Exception as e:
+ msg = 'Get snapshots from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_nfs_exports_list(self):
+ """Get the list of NFS exports on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting NFS exports list")
+ nfs_exports = self.unity.get_nfs_share()
+ return result_list(nfs_exports)
+
+ except Exception as e:
+ msg = 'Get NFS exports from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_smb_shares_list(self):
+ """Get the list of SMB shares on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting SMB shares list")
+ smb_shares = self.unity.get_cifs_share()
+ return result_list(smb_shares)
+
+ except Exception as e:
+ msg = 'Get SMB shares from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_user_quota_list(self):
+ """Get the list of user quotas on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting user quota list")
+ user_quotas = self.unity.get_user_quota()
+ return user_quota_result_list(user_quotas)
+
+ except Exception as e:
+ msg = 'Get user quotas from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_tree_quota_list(self):
+ """Get the list of quota trees on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting quota tree list")
+ tree_quotas = self.unity.get_tree_quota()
+ return tree_quota_result_list(tree_quotas)
+
+ except Exception as e:
+ msg = 'Get quota trees from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_disk_groups_list(self):
+ """Get the list of disk group details on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting disk group list")
+ pool_disk_list = []
+ disk_instances = utils.UnityDiskGroupList(cli=self.unity._cli)
+ if disk_instances:
+ for disk in disk_instances:
+ pool_disk = {"id": disk.id, "name": disk.name,
+ "tier_type": disk.tier_type.name}
+ pool_disk_list.append(pool_disk)
+ return pool_disk_list
+ except Exception as e:
+ msg = 'Get disk group from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_nfs_server_list(self):
+ """Get the list of NFS servers on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting NFS servers list")
+ nfs_servers = self.unity.get_nfs_server()
+ return nfs_server_result_list(nfs_servers)
+
+ except Exception as e:
+ msg = 'Get NFS servers list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_cifs_server_list(self):
+ """Get the list of CIFS servers on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting CIFS servers list")
+ cifs_servers = self.unity.get_cifs_server()
+ return result_list(cifs_servers)
+
+ except Exception as e:
+ msg = 'Get CIFS servers list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_ethernet_port_list(self):
+ """Get the list of ethernet ports on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting ethernet ports list")
+ ethernet_port = self.unity.get_ethernet_port()
+ return result_list(ethernet_port)
+
+ except Exception as e:
+ msg = 'Get ethernet port list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_file_interface_list(self):
+ """Get the list of file interfaces on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting file interfaces list")
+ file_interface = self.unity.get_file_interface()
+ return file_interface_result_list(file_interface)
+
+ except Exception as e:
+ msg = 'Get file interface list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def perform_module_operation(self):
+ """ Perform different actions on Info based on user parameter
+ chosen in playbook """
+
+ """ Get the array details a given Unity storage system """
+
+ array_details = self.get_array_details()
+ host = []
+ fc_initiator = []
+ iscsi_initiator = []
+ cg = []
+ storage_pool = []
+ vol = []
+ snapshot_schedule = []
+ nas_server = []
+ file_system = []
+ snapshot = []
+ nfs_export = []
+ smb_share = []
+ user_quota = []
+ tree_quota = []
+ disk_group = []
+ nfs_server = []
+ cifs_server = []
+ ethernet_port = []
+ file_interface = []
+
+ subset = self.module.params['gather_subset']
+ if subset is not None:
+ if 'host' in subset:
+ host = self.get_hosts_list()
+ if 'fc_initiator' in subset:
+ fc_initiator = self.get_fc_initiators_list()
+ if 'iscsi_initiator' in subset:
+ iscsi_initiator = self.get_iscsi_initiators_list()
+ if 'cg' in subset:
+ cg = self.get_consistency_groups_list()
+ if 'storage_pool' in subset:
+ storage_pool = self.get_storage_pools_list()
+ if 'vol' in subset:
+ vol = self.get_volumes_list()
+ if 'snapshot_schedule' in subset:
+ snapshot_schedule = self.get_snapshot_schedules_list()
+ if 'nas_server' in subset:
+ nas_server = self.get_nas_servers_list()
+ if 'file_system' in subset:
+ file_system = self.get_file_systems_list()
+ if 'snapshot' in subset:
+ snapshot = self.get_snapshots_list()
+ if 'nfs_export' in subset:
+ nfs_export = self.get_nfs_exports_list()
+ if 'smb_share' in subset:
+ smb_share = self.get_smb_shares_list()
+ if 'user_quota' in subset:
+ user_quota = self.get_user_quota_list()
+ if 'tree_quota' in subset:
+ tree_quota = self.get_tree_quota_list()
+ if 'disk_group' in subset:
+ disk_group = self.get_disk_groups_list()
+ if 'nfs_server' in subset:
+ nfs_server = self.get_nfs_server_list()
+ if 'cifs_server' in subset:
+ cifs_server = self.get_cifs_server_list()
+ if 'ethernet_port' in subset:
+ ethernet_port = self.get_ethernet_port_list()
+ if 'file_interface' in subset:
+ file_interface = self.get_file_interface_list()
+
+ self.module.exit_json(
+ Array_Details=array_details,
+ Hosts=host,
+ FC_initiators=fc_initiator,
+ ISCSI_initiators=iscsi_initiator,
+ Consistency_Groups=cg,
+ Storage_Pools=storage_pool,
+ Volumes=vol,
+ Snapshot_Schedules=snapshot_schedule,
+ NAS_Servers=nas_server,
+ File_Systems=file_system,
+ Snapshots=snapshot,
+ NFS_Exports=nfs_export,
+ SMB_Shares=smb_share,
+ User_Quotas=user_quota,
+ Tree_Quotas=tree_quota,
+ Disk_Groups=disk_group,
+ NFS_Servers=nfs_server,
+ CIFS_Servers=cifs_server,
+ Ethernet_ports=ethernet_port,
+ File_interfaces=file_interface
+ )
+
+
+def result_list(entity):
+ """ Get the name and id associated with the Unity entities """
+ result = []
+
+ if entity:
+ LOG.info(SUCCESSFULL_LISTED_MSG)
+ for item in entity:
+ result.append(
+ item._get_properties()
+ )
+ return result
+ else:
+ return None
+
+
+def fc_initiators_result_list(entity):
+ """ Get the WWN and id associated with the Unity FC initiators """
+ result = []
+
+ if entity:
+ LOG.info(SUCCESSFULL_LISTED_MSG)
+ for item in entity:
+ result.append(
+ {
+ "WWN": item.initiator_id,
+ "id": item.id
+ }
+ )
+ return result
+ else:
+ return None
+
+
+def iscsi_initiators_result_list(entity):
+ """ Get the IQN and id associated with the Unity ISCSI initiators """
+ result = []
+
+ if entity:
+ LOG.info(SUCCESSFULL_LISTED_MSG)
+ for item in entity:
+ result.append(
+ {
+ "IQN": item.initiator_id,
+ "id": item.id
+ }
+ )
+ return result
+ else:
+ return None
+
+
+def user_quota_result_list(entity):
+ """ Get the id and uid associated with the Unity user quotas """
+ result = []
+
+ if entity:
+ LOG.info(SUCCESSFULL_LISTED_MSG)
+ for item in entity:
+ result.append(
+ {
+ "uid": item.uid,
+ "id": item.id
+ }
+ )
+ return result
+ else:
+ return None
+
+
+def tree_quota_result_list(entity):
+ """ Get the id and path associated with the Unity quota trees """
+ result = []
+
+ if entity:
+ LOG.info(SUCCESSFULL_LISTED_MSG)
+ for item in entity:
+ result.append(
+ {
+ "path": item.path,
+ "id": item.id
+ }
+ )
+ return result
+ else:
+ return None
+
+
+def nfs_server_result_list(entity):
+ """ Get the id of NFS Server """
+ result = []
+
+ if entity:
+ LOG.info(SUCCESSFULL_LISTED_MSG)
+ for item in entity:
+ result.append(
+ item._get_properties()
+ )
+ return result
+ else:
+ return None
+
+
+def file_interface_result_list(entity):
+ """ Get the id, name and IP of File Interfaces """
+ result = []
+
+ if entity:
+ LOG.info(SUCCESSFULL_LISTED_MSG)
+ for item in entity:
+ result.append(
+ item._get_properties()
+ )
+ return result
+ else:
+ return None
+
+
+def get_info_parameters():
+ """This method provides parameters required for the ansible
+ info module on Unity"""
+ return dict(gather_subset=dict(type='list', required=False,
+ elements='str',
+ choices=['host', 'fc_initiator',
+ 'iscsi_initiator', 'cg',
+ 'storage_pool', 'vol',
+ 'snapshot_schedule', 'nas_server',
+ 'file_system', 'snapshot',
+ 'nfs_export', 'smb_share',
+ 'user_quota', 'tree_quota', 'disk_group', 'nfs_server', 'cifs_server',
+ 'ethernet_port', 'file_interface']))
+
+
+def main():
+ """ Create Unity Info object and perform action on it
+ based on user input from playbook"""
+ obj = Info()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/interface.py b/ansible_collections/dellemc/unity/plugins/modules/interface.py
new file mode 100644
index 000000000..95ddfd26a
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/interface.py
@@ -0,0 +1,531 @@
+#!/usr/bin/python
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing Interfaces on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: interface
+version_added: '1.4.0'
+short_description: Manage Interfaces on Unity storage system
+description:
+- Managing the Interfaces on the Unity storage system includes adding Interfaces to NAS Server, getting
+ details of interface and deleting configured interfaces.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Meenakshi Dembi (@dembim) <ansible.team@dell.com>
+
+options:
+ nas_server_name:
+ description:
+ - Name of the NAS server for which interface will be configured.
+ type: str
+ nas_server_id:
+ description:
+ - ID of the NAS server for which interface will be configured.
+ type: str
+ ethernet_port_name:
+ description:
+ - Name of the ethernet port.
+ type: str
+ ethernet_port_id:
+ description:
+ - ID of the ethernet port.
+ type: str
+ role:
+ description:
+ - Indicates whether interface is configured as production or backup.
+ choices: [PRODUCTION, BACKUP]
+ type: str
+ interface_ip:
+ description:
+ - IP of network interface.
+ required: true
+ type: str
+ netmask:
+ description:
+ - Netmask of network interface.
+ type: str
+ prefix_length:
+ description:
+ - Prefix length is mutually exclusive with I(netmask).
+ type: int
+ gateway:
+ description:
+ - Gateway of network interface.
+ type: str
+ vlan_id:
+ description:
+ - Vlan id of the interface.
+ type: int
+ state:
+ description:
+ - Define whether the interface should exist or not.
+ choices: [present, absent]
+ required: true
+ type: str
+notes:
+- The I(check_mode) is supported.
+- Modify operation for interface is not supported.
+'''
+
+EXAMPLES = r'''
+
+ - name: Add Interface as Backup to NAS Server
+ dellemc.unity.interface:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ ethernet_port_name: "SP A 4-Port Card Ethernet Port 0"
+ role: "BACKUP"
+ interface_ip: "xx.xx.xx.xx"
+ netmask: "xx.xx.xx.xx"
+ gateway: "xx.xx.xx.xx"
+ vlan_id: 324
+ state: "present"
+
+ - name: Add Interface as Production to NAS Server
+ dellemc.unity.interface:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ ethernet_port_name: "SP A 4-Port Card Ethernet Port 0"
+ role: "PRODUCTION"
+ interface_ip: "xx.xx.xx.xx"
+ netmask: "xx.xx.xx.xx"
+ gateway: "xx.xx.xx.xx"
+ vlan_id: 324
+ state: "present"
+
+ - name: Get interface details
+ dellemc.unity.interface:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ interface_ip: "xx.xx.xx.xx"
+ state: "present"
+
+ - name: Delete Interface
+ dellemc.unity.interface:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ interface_ip: "xx.xx.xx.xx"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+interface_details:
+ description: Details of the interface.
+ returned: When interface is configured for NAS Server.
+ type: dict
+ contains:
+ existed:
+ description: Indicates if interface exists.
+ type: bool
+ gateway:
+ description: Gateway of network interface.
+ type: str
+ id:
+ description: Unique identifier interface.
+ type: str
+ ip_address:
+ description: IP address of interface.
+ type: str
+ ip_port:
+ description: Port on which network interface is configured.
+ type: dict
+ contains:
+ id:
+ description: ID of ip_port.
+ type: str
+ ip_protocol_version:
+ description: IP protocol version.
+ type: str
+ is_disabled:
+ description: Indicates whether interface is disabled.
+ type: bool
+ is_preferred:
+ description: Indicates whether interface is preferred.
+ type: bool
+ mac_address:
+ description: Mac address of ip_port.
+ type: bool
+ name:
+ description: System configured name of interface.
+ type: bool
+ nas_server:
+ description: Details of NAS server where interface is configured.
+ type: dict
+ contains:
+ id:
+ description: ID of NAS Server.
+ type: str
+ sample: {
+ "existed": true,
+ "gateway": "xx.xx.xx.xx",
+ "hash": 8785300560421,
+ "health": {
+ "UnityHealth": {
+ "hash": 8785300565468
+ }
+ },
+ "id": "if_69",
+ "ip_address": "10.10.10.10",
+ "ip_port": {
+ "UnityIpPort": {
+ "hash": 8785300565300,
+ "id": "spb_ocp_0_eth0"
+ }
+ },
+ "ip_protocol_version": "IpProtocolVersionEnum.IPv4",
+ "is_disabled": false,
+ "is_preferred": true,
+ "mac_address": "0C:48:C6:9F:57:BF",
+ "name": "36_APM00213404194",
+ "nas_server": {
+ "UnityNasServer": {
+ "hash": 8785300565417,
+ "id": "nas_10"
+ }
+ },
+ "netmask": "10.10.10.10",
+ "replication_policy": null,
+ "role": "FileInterfaceRoleEnum.PRODUCTION",
+ "source_parameters": null,
+ "v6_prefix_length": null,
+ "vlan_id": 324
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+import ipaddress
+from ipaddress import ip_network
+
+LOG = utils.get_logger('interface')
+
+application_type = "Ansible/1.6.0"
+
+
+class Interface(object):
+ """Class with Interface operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_interface_parameters())
+
+ mutually_exclusive = [['nas_server_name', 'nas_server_id'], ['ethernet_port_id', 'ethernet_port_name'], ['netmask', 'prefix_length']]
+ required_one_of = [['nas_server_name', 'nas_server_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of
+ )
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ LOG.info('Check Mode Flag %s', self.module.check_mode)
+
+ def get_interface_details(self, nas_server_obj):
+ """Get interface details.
+ :param: nas_server_obj: NAS server object.
+ :return: Returns interface details configured on NAS server.
+ """
+
+ try:
+ nas_server_obj_properties = nas_server_obj._get_properties()
+ if nas_server_obj_properties['file_interface']:
+ for item in nas_server_obj_properties['file_interface']['UnityFileInterfaceList']:
+ interface_id = self.unity_conn.get_file_interface(_id=item['UnityFileInterface']['id'])
+ if interface_id.ip_address == self.module.params['interface_ip']:
+ return interface_id
+ return None
+ except Exception as e:
+ error_msg = "Getting Interface details failed" \
+ " with error %s" % (str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_nas_server_obj(self, nas_server_name, nas_server_id):
+ """Get NAS server ID.
+ :param: nas_server_name: The name of NAS server
+ :param: nas_server_id: ID of NAS server
+ :return: Return NAS server object if exists
+ """
+
+ LOG.info("Getting NAS server object")
+ try:
+ if nas_server_name:
+ obj_nas = self.unity_conn.get_nas_server(name=nas_server_name)
+ return obj_nas
+ elif nas_server_id:
+ obj_nas = self.unity_conn.get_nas_server(_id=nas_server_id)
+ if obj_nas._get_properties()['existed']:
+ return obj_nas
+ else:
+ msg = "NAS server with id %s does not exist" % (nas_server_id)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to get details of NAS server with error: %s" % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def add_interface(self, nas_server_obj, ethernet_port_id=None, ethernet_port_name=None, role=None, interface_ip=None,
+ netmask=None, prefix_length=None, gateway=None, vlan_id=None):
+ """Adding interface to NAS server.
+ :param: nas_server_obj: The NAS server object.
+ :param: ethernet_port_id: ID of ethernet port.
+ :param: ethernet_port_name: Name of ethernet port.
+ :param: role: Role of the interface.
+ :param: interface_ip: IP of interface.
+ :param: netmask: Netmask for interface.
+ :param: prefix_length: Prefix length.
+ :param: gateway: Gateway for interface.
+ :param: vlan_id: vlan_id for interface.
+ :return: Return True if interface is configured successfully.
+ """
+
+ LOG.info("Adding interface to NAS Server")
+ try:
+ nas_server_obj_properties = nas_server_obj._get_properties()
+ if nas_server_obj_properties['file_interface']:
+ for item in nas_server_obj_properties['file_interface']['UnityFileInterfaceList']:
+ interface_id = self.unity_conn.get_file_interface(_id=item['UnityFileInterface']['id'])
+ if interface_id._get_properties()['ip_address'] == self.module.params['interface_ip']:
+ return False
+ if role:
+ role_value = get_role_enum(role)
+ if ethernet_port_name:
+ ethernet_port_info = self.unity_conn.get_ethernet_port(name=ethernet_port_name)
+ ethernet_port_id = ethernet_port_info.id
+ if not self.module.check_mode:
+ utils.UnityFileInterface.create(cli=self.unity_conn._cli, nas_server=nas_server_obj.get_id(), ip_port=ethernet_port_id,
+ role=role_value, ip=interface_ip, netmask=netmask, v6_prefix_length=prefix_length,
+ gateway=gateway, vlan_id=vlan_id)
+ return True
+ except Exception as e:
+ msg = "Failed to add interface to NAS Server with error: %s" % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def is_modification_required(self, interface_details):
+ """Check if modification is required in existing interface/s configured for NAS Server
+ :param: interface_details: Existing interface details
+ :return: True if modification is required
+ """
+ key_list = ['vlan_id', 'gateway', 'netmask']
+ for item in key_list:
+ if self.module.params[item] and self.module.params[item] != interface_details[item]:
+ return True
+ return False
+
+ def delete_interface(self, interface_obj):
+ """Delete NFS server.
+ :param: interface_obj: Interface object.
+ :return: Return True if interface is deleted.
+ """
+
+ LOG.info("Deleting interface")
+ try:
+ if not self.module.check_mode:
+ interface_obj.delete()
+ return True
+ except Exception as e:
+ msg = "Failed to delete interface with error: %s" % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def validate_input_params(self):
+ """Validates input parameters"""
+ param_list = ["nas_server_id", "nas_server_name",
+ "ethernet_port_name", "ethernet_port_id", "role",
+ "interface_ip", "netmask", "gateway"]
+
+ for param in param_list:
+ msg = "Please provide valid value for: %s" % param
+ if self.module.params[param] is not None and \
+ len(self.module.params[param].strip()) == 0:
+ errmsg = msg.format(param)
+ self.module.fail_json(msg=errmsg)
+
+ if self.module.params['vlan_id'] is not None and \
+ (self.module.params['vlan_id'] <= 3 or
+ self.module.params['vlan_id'] >= 4094):
+ self.module.fail_json(msg='vlan_id should be in the '
+ 'range of 3 to 4094')
+
+ if self.module.params['interface_ip'] and \
+ not is_valid_ip(self.module.params['interface_ip']):
+ self.module.fail_json(msg='The value for interface ip is invalid')
+
+ if self.module.params['gateway'] and \
+ not is_valid_ip(self.module.params['gateway']):
+ self.module.fail_json(msg='The value for gateway is invalid')
+
+ if self.module.params['netmask'] and not \
+ utils.is_valid_netmask(self.module.params['netmask']):
+ self.module.fail_json(msg='Invalid IPV4 address specified '
+ 'for netmask')
+
+ if self.module.params['interface_ip'] and \
+ (get_ip_version(self.module.params['interface_ip']) == 6):
+ self.module.fail_json(msg='IPv6 format is not supported')
+
+ def validate_create_params(self):
+ """Validates input parameters for adding interface"""
+ if self.module.params['role'] is None:
+ self.module.fail_json(msg='Role is a mandatory parameter'
+ ' for adding interface to NAS Server.')
+ if self.module.params['ethernet_port_name'] is None and \
+ self.module.params['ethernet_port_id'] is None:
+ self.module.fail_json(msg='ethernet_port_name/ethernet_port_id '
+ 'is mandatory parameter for adding '
+ 'interface to NAS Server.')
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on Interface module based on parameters
+ passed in the playbook
+ """
+ nas_server_id = self.module.params['nas_server_id']
+ nas_server_name = self.module.params['nas_server_name']
+ ethernet_port_name = self.module.params['ethernet_port_name']
+ ethernet_port_id = self.module.params['ethernet_port_id']
+ role = self.module.params['role']
+ interface_ip = self.module.params['interface_ip']
+ netmask = self.module.params['netmask']
+ prefix_length = self.module.params['prefix_length']
+ gateway = self.module.params['gateway']
+ vlan_id = self.module.params['vlan_id']
+ state = self.module.params['state']
+
+ # result is a dictionary that contains changed status and Interface details
+ result = dict(
+ changed=False,
+ interface_details={}
+ )
+ modify_flag = False
+
+ self.validate_input_params()
+
+ interface_details = None
+
+ nas_server_obj = self.get_nas_server_obj(nas_server_name, nas_server_id)
+
+ interface_obj = self.get_interface_details(nas_server_obj)
+
+ if interface_obj and state == 'present':
+ interface_details = interface_obj._get_properties()
+ modify_flag = self.is_modification_required(interface_details)
+ if modify_flag:
+ self.module.fail_json(msg="Modification of Interfaces for NAS server is not supported through Ansible module")
+
+ if not interface_obj and state == 'present':
+ self.validate_create_params()
+
+ result['changed'] = self.add_interface(nas_server_obj, ethernet_port_id, ethernet_port_name, role,
+ interface_ip, netmask, prefix_length, gateway, vlan_id)
+
+ if interface_obj and state == 'absent':
+ result['changed'] = self.delete_interface(interface_obj)
+
+ if result['changed']:
+ nas_server_obj = self.get_nas_server_obj(nas_server_name, nas_server_id)
+ interface_obj = self.get_interface_details(nas_server_obj)
+ if interface_obj:
+ interface_details = interface_obj._get_properties()
+
+ result['interface_details'] = interface_details
+
+ self.module.exit_json(**result)
+
+
+def get_interface_parameters():
+ """This method provide parameters required for the ansible
+ Interface module on Unity"""
+ return dict(
+ nas_server_id=dict(type='str'),
+ nas_server_name=dict(type='str'),
+ ethernet_port_name=dict(type='str'),
+ ethernet_port_id=dict(type='str'),
+ role=dict(type='str', choices=['PRODUCTION', 'BACKUP']),
+ interface_ip=dict(required=True, type='str'),
+ netmask=dict(type='str'),
+ prefix_length=dict(type='int'),
+ gateway=dict(type='str'),
+ vlan_id=dict(type='int'),
+ state=dict(required=True, type='str', choices=['present', 'absent'])
+ )
+
+
+def get_role_enum(role):
+ """Getting correct enum values for role
+ :param: role: Indicates role of interface.
+ :return: enum value for role.
+ """
+ if utils.FileInterfaceRoleEnum[role]:
+ role = utils.FileInterfaceRoleEnum[role]
+ return role
+
+
+def is_valid_ip(address):
+ """Validating IP address format
+ :param: address: IP address to be validated for format.
+ """
+ try:
+ ipaddress.ip_address(address)
+ return True
+ except ValueError:
+ return False
+
+
+def get_ip_version(val):
+ """Returns IP address version
+ :param: val: IP address to be validated for version.
+ """
+ try:
+ val = u'{0}'.format(val)
+ ip = ip_network(val, strict=False)
+ return ip.version
+ except ValueError:
+ return 0
+
+
+def main():
+ """Create Unity Interface object and perform action on it
+ based on user input from playbook"""
+ obj = Interface()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/nasserver.py b/ansible_collections/dellemc/unity/plugins/modules/nasserver.py
new file mode 100644
index 000000000..713125cc2
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/nasserver.py
@@ -0,0 +1,1142 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: nasserver
+version_added: '1.1.0'
+short_description: Manage NAS servers on Unity storage system
+extends_documentation_fragment:
+- dellemc.unity.unity
+author:
+- P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com>
+description:
+- Managing NAS servers on Unity storage system includes get,
+ modification to the NAS servers.
+options:
+ nas_server_id:
+ description:
+ - The ID of the NAS server.
+ - Either I(nas_server_name) or I(nas_server_id) is required to perform the task.
+ - The parameters I(nas_server_name) and I(nas_server_id) are mutually exclusive.
+ type: str
+ nas_server_name:
+ description:
+ - The Name of the NAS server.
+ - Either I(nas_server_name) or I(nas_server_id) is required to perform the task.
+ - The parameters I(nas_server_name) and I(nas_server_id) are mutually exclusive.
+ type: str
+ nas_server_new_name:
+ description:
+ - The new name of the NAS server.
+ - It can be mentioned during modification of the NAS server.
+ type: str
+ is_replication_destination:
+ description:
+ - It specifies whether the NAS server is a replication destination.
+ - It can be mentioned during modification of the NAS server.
+ type: bool
+ is_backup_only:
+ description:
+ - It specifies whether the NAS server is used as backup only.
+ - It can be mentioned during modification of the NAS server.
+ type: bool
+ is_multiprotocol_enabled:
+ description:
+ - This parameter indicates whether multiprotocol sharing mode is enabled.
+ - It can be mentioned during modification of the NAS server.
+ type: bool
+ allow_unmapped_user:
+ description:
+ - This flag is used to mandatorily disable access in case of any user
+ mapping failure.
+ - If C(true), then enable access in case of any user mapping failure.
+ - If C(false), then disable access in case of any user mapping failure.
+ - It can be mentioned during modification of the NAS server.
+ type: bool
+ default_windows_user:
+ description:
+ - Default windows user name used for granting access in the case of Unix
+ to Windows user mapping failure.
+ - It can be mentioned during modification of the NAS server.
+ type: str
+ default_unix_user:
+ description:
+ - Default Unix user name used for granting access in the case of Windows
+ to Unix user mapping failure.
+ - It can be mentioned during modification of the NAS server.
+ type: str
+ enable_windows_to_unix_username_mapping:
+ description:
+ - This parameter indicates whether a Unix to/from Windows user name
+ mapping is enabled.
+ - It can be mentioned during modification of the NAS server.
+ type: bool
+ is_packet_reflect_enabled:
+ description:
+ - If the packet has to be reflected, then this parameter
+ has to be set to C(true).
+ - It can be mentioned during modification of the NAS server.
+ type: bool
+ current_unix_directory_service:
+ description:
+ - This is the directory service used for querying identity information
+ for UNIX (such as UIDs, GIDs, net groups).
+ - It can be mentioned during modification of the NAS server.
+ type: str
+ choices: ["NONE", "NIS", "LOCAL", "LDAP", "LOCAL_THEN_NIS", "LOCAL_THEN_LDAP"]
+ replication_params:
+ description:
+ - Settings required for enabling replication.
+ type: dict
+ suboptions:
+ destination_nas_server_name:
+ description:
+ - Name of the destination nas server.
+ - Default value will be source nas server name prefixed by 'DR_'.
+ type: str
+ replication_mode:
+ description:
+ - The replication mode.
+ - This is mandatory to enable replication.
+ type: str
+ choices: ['asynchronous', 'manual']
+ rpo:
+ description:
+ - Maximum time to wait before the system syncs the source and destination LUNs.
+ - The I(rpo) option should be specified if the I(replication_mode) is C(asynchronous).
+ - The value should be in range of C(5) to C(1440).
+ type: int
+ replication_type:
+ description:
+ - Type of replication.
+ choices: ['local', 'remote']
+ type: str
+ remote_system:
+ description:
+ - Details of remote system to which the replication is being configured.
+ - The I(remote_system) option should be specified if the
+ I(replication_type) is C(remote).
+ type: dict
+ suboptions:
+ remote_system_host:
+ required: true
+ description:
+ - IP or FQDN for remote Unity unisphere Host.
+ type: str
+ remote_system_username:
+ type: str
+ required: true
+ description:
+ - User name of remote Unity unisphere Host.
+ remote_system_password:
+ type: str
+ required: true
+ description:
+ - Password of remote Unity unisphere Host.
+ remote_system_verifycert:
+ type: bool
+ default: true
+ description:
+ - Boolean variable to specify whether or not to validate SSL
+ certificate of remote Unity unisphere Host.
+ - C(true) - Indicates that the SSL certificate should be verified.
+ - C(false) - Indicates that the SSL certificate should not be
+ verified.
+ remote_system_port:
+ description:
+ - Port at which remote Unity unisphere is hosted.
+ type: int
+ default: 443
+ destination_pool_name:
+ description:
+ - Name of pool to allocate destination Luns.
+ - Mutually exclusive with I(destination_pool_id).
+ type: str
+ destination_pool_id:
+ description:
+ - Id of pool to allocate destination Luns.
+ - Mutually exclusive with I(destination_pool_name).
+ type: str
+ destination_sp:
+ description:
+ - Storage process of destination nas server
+ choices: ['SPA', 'SPB']
+ type: str
+ is_backup:
+ description:
+ - Indicates if the destination nas server is backup.
+ type: bool
+ replication_name:
+ description:
+ - User defined name for replication session.
+ type: str
+ new_replication_name:
+ description:
+ - Replication name to rename the session to.
+ type: str
+ replication_state:
+ description:
+ - State of the replication.
+ choices: ['enable', 'disable']
+ type: str
+ replication_reuse_resource:
+ description:
+ - This parameter indicates if existing NAS Server is to be used for replication.
+ type: bool
+ state:
+ description:
+ - Define the state of NAS server on the array.
+ - The value present indicates that NAS server should exist on the system after
+ the task is executed.
+ - In this release deletion of NAS server is not supported. Hence, if state is
+ set to C(absent) for any existing NAS server then error will be thrown.
+ - For any non-existing NAS server, if state is set to C(absent) then it will return None.
+ type: str
+ required: true
+ choices: ['present', 'absent']
+
+notes:
+- The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+
+ - name: Get Details of NAS Server
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "{{nas_server_name}}"
+ state: "present"
+
+ - name: Modify Details of NAS Server
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "{{nas_server_name}}"
+ nas_server_new_name: "updated_sample_nas_server"
+ is_replication_destination: False
+ is_backup_only: False
+ is_multiprotocol_enabled: True
+ allow_unmapped_user: True
+ default_unix_user: "default_unix_sample_user"
+ default_windows_user: "default_windows_sample_user"
+ enable_windows_to_unix_username_mapping: True
+ current_unix_directory_service: "LDAP"
+ is_packet_reflect_enabled: True
+ state: "present"
+
+ - name: Enable replication for NAS Server on Local System
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_id: "nas_10"
+ replication_reuse_resource: False
+ replication_params:
+ replication_name: "test_replication"
+ destination_nas_server_name: "destination_nas"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "local"
+ destination_pool_name: "Pool_Ansible_Neo_DND"
+ destination_sp: "SPA"
+ is_backup: True
+ replication_state: "enable"
+ state: "present"
+
+ - name: Enable replication for NAS Server on Remote System
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_reuse_resource: False
+ replication_params:
+ replication_name: "test_replication"
+ destination_nas_server_name: "destination_nas"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "remote"
+ remote_system:
+ remote_system_host: '10.10.10.10'
+ remote_system_verifycert: False
+ remote_system_username: 'test1'
+ remote_system_password: 'test1!'
+ destination_pool_name: "fastVP_pool"
+ destination_sp: "SPA"
+ is_backup: True
+ replication_state: "enable"
+ state: "present"
+
+ - name: Enable replication for NAS Server on Remote System in existing NAS Server
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_reuse_resource: True
+ replication_params:
+ destination_nas_server_name: "destination_nas"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "remote"
+ replication_name: "test_replication"
+ remote_system:
+ remote_system_host: '10.10.10.10'
+ remote_system_verifycert: False
+ remote_system_username: 'test1'
+ remote_system_password: 'test1!'
+ destination_pool_name: "fastVP_pool"
+ replication_state: "enable"
+ state: "present"
+
+ - name: Modify replication on the nasserver
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_params:
+ replication_name: "test_repl"
+ new_replication_name: "test_repl_updated"
+ replication_mode: "asynchronous"
+ rpo: 50
+ replication_state: "enable"
+ state: "present"
+
+ - name: Disable replication on the nasserver
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_state: "disable"
+ state: "present"
+
+ - name: Disable replication by specifying replication_name on the nasserver
+ dellemc.unity.nasserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ replication_params:
+ replication_name: "test_replication"
+ replication_state: "disable"
+ state: "present"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: True
+nas_server_details:
+ description: The NAS server details.
+ type: dict
+ returned: When NAS server exists.
+ contains:
+ name:
+ description: Name of the NAS server.
+ type: str
+ id:
+ description: ID of the NAS server.
+ type: str
+ allow_unmapped_user:
+ description: Enable/disable access status in case of any user
+ mapping failure.
+ type: bool
+ current_unix_directory_service:
+ description: Directory service used for querying identity
+ information for UNIX (such as UIDs, GIDs, net groups).
+ type: str
+ default_unix_user:
+ description: Default Unix user name used for granting access
+ in the case of Windows to Unix user mapping failure.
+ type: str
+ default_windows_user:
+ description: Default windows user name used for granting
+ access in the case of Unix to Windows user mapping
+ failure.
+ type: str
+ is_backup_only:
+ description: Whether the NAS server is used as backup only.
+ type: bool
+ is_multi_protocol_enabled:
+ description: Indicates whether multiprotocol sharing mode is
+ enabled.
+ type: bool
+ is_packet_reflect_enabled:
+ description: If the packet reflect has to be enabled.
+ type: bool
+ is_replication_destination:
+ description: If the NAS server is a replication destination
+ then True.
+ type: bool
+ is_windows_to_unix_username_mapping_enabled:
+ description: Indicates whether a Unix to/from Windows user name
+ mapping is enabled.
+ type: bool
+ sample: {
+ "allow_unmapped_user": null,
+ "cifs_server": {
+ "UnityCifsServerList": [
+ {
+ "UnityCifsServer": {
+ "hash": 8761756885270,
+ "id": "cifs_34"
+ }
+ }
+ ]
+ },
+ "current_sp": {
+ "UnityStorageProcessor": {
+ "hash": 8761756885273,
+ "id": "spb"
+ }
+ },
+ "current_unix_directory_service": "NasServerUnixDirectoryServiceEnum.NIS",
+ "default_unix_user": null,
+ "default_windows_user": null,
+ "existed": true,
+ "file_dns_server": {
+ "UnityFileDnsServer": {
+ "hash": 8761756885441,
+ "id": "dns_12"
+ }
+ },
+ "file_interface": {
+ "UnityFileInterfaceList": [
+ {
+ "UnityFileInterface": {
+ "hash": 8761756889908,
+ "id": "if_37"
+ }
+ }
+ ]
+ },
+ "filesystems": null,
+ "hash": 8761757005084,
+ "health": {
+ "UnityHealth": {
+ "hash": 8761756867588
+ }
+ },
+ "home_sp": {
+ "UnityStorageProcessor": {
+ "hash": 8761756867618,
+ "id": "spb"
+ }
+ },
+ "id": "nas_10",
+ "is_backup_only": false,
+ "is_multi_protocol_enabled": false,
+ "is_packet_reflect_enabled": false,
+ "is_replication_destination": false,
+ "is_replication_enabled": true,
+ "is_windows_to_unix_username_mapping_enabled": null,
+ "name": "dummy_nas",
+ "pool": {
+ "UnityPool": {
+ "hash": 8761756885360,
+ "id": "pool_7"
+ }
+ },
+ "preferred_interface_settings": {
+ "UnityPreferredInterfaceSettings": {
+ "hash": 8761756885438,
+ "id": "preferred_if_10"
+ }
+ },
+ "replication_type": "ReplicationTypeEnum.REMOTE",
+ "size_allocated": 3489660928,
+ "tenant": null,
+ "virus_checker": {
+ "UnityVirusChecker": {
+ "hash": 8761756885426,
+ "id": "cava_10"
+ }
+ }
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+LOG = utils.get_logger('nasserver')
+
+application_type = "Ansible/1.6.0"
+
+
+class NASServer(object):
+ """Class with NAS Server operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_nasserver_parameters())
+
+ # initialize the ansible module
+ mut_ex_args = [['nas_server_name', 'nas_server_id']]
+ required_one_of = [['nas_server_name', 'nas_server_id']]
+
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mut_ex_args,
+ required_one_of=required_one_of
+ )
+ utils.ensure_required_libs(self.module)
+
+ # result is a dictionary that contains changed status and
+ # nas server details
+ self.result = {"changed": False,
+ 'nas_server_details': {}}
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ self.nas_server_conn_obj = utils.nas_server.UnityNasServer(
+ self.unity_conn)
+ LOG.info('Connection established with the Unity Array')
+
+ def get_current_uds_enum(self, current_uds):
+ """
+ Get the enum of the Offline Availability parameter.
+ :param current_uds: Current Unix Directory Service string
+ :return: current_uds enum
+ """
+ if current_uds in \
+ utils.NasServerUnixDirectoryServiceEnum.__members__:
+ return utils.NasServerUnixDirectoryServiceEnum[current_uds]
+ else:
+ error_msg = "Invalid value {0} for Current Unix Directory" \
+ " Service provided".format(current_uds)
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_nas_server(self, nas_server_name, nas_server_id):
+ """
+ Get the NAS Server Object using NAME/ID of the NAS Server.
+ :param nas_server_name: Name of the NAS Server
+ :param nas_server_id: ID of the NAS Server
+ :return: NAS Server object.
+ """
+ nas_server = nas_server_name if nas_server_name else nas_server_id
+ try:
+ obj_nas = self.unity_conn.get_nas_server(_id=nas_server_id,
+ name=nas_server_name)
+ if nas_server_id and obj_nas and not obj_nas.existed:
+ # if obj_nas is not None and existed is observed as False,
+ # then None will be returned.
+ LOG.error("NAS Server object does not exist"
+ " with ID: %s ", nas_server_id)
+ return None
+ return obj_nas
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ cred_err = "Incorrect username or password , {0}".format(
+ e.message)
+ self.module.fail_json(msg=cred_err)
+ else:
+ err_msg = "Failed to get details of NAS Server" \
+ " {0} with error {1}".format(nas_server, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ err_msg = "Failed to get details of NAS Server" \
+ " {0} with error {1}".format(nas_server, str(e))
+ LOG.error(err_msg)
+ return None
+
+ except Exception as e:
+ nas_server = nas_server_name if nas_server_name \
+ else nas_server_id
+ err_msg = "Failed to get nas server details {0} with" \
+ " error {1}".format(nas_server, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ def to_update(self, nas_server_obj, current_uds):
+ LOG.info("Checking Whether the parameters are modified or not.")
+
+ # Checking all parameters individually because the nas obj return
+ # names are different compared to ansible parameter names.
+
+ # Current Unix Directory Service
+ if current_uds is not None and \
+ current_uds != nas_server_obj.current_unix_directory_service:
+ return True
+
+ # Rename NAS Server
+ if self.module.params['nas_server_new_name'] is not None and \
+ self.module.params['nas_server_new_name'] != \
+ nas_server_obj.name:
+ return True
+
+ # Is Replication Destination
+ if self.module.params["is_replication_destination"] is not None and \
+ (nas_server_obj.is_replication_destination is None or
+ self.module.params["is_replication_destination"] !=
+ nas_server_obj.is_replication_destination):
+ return True
+
+ # Is Multiprotocol Enabled
+ if self.module.params["is_multiprotocol_enabled"] is not None and \
+ (nas_server_obj.is_multi_protocol_enabled is None or
+ self.module.params["is_multiprotocol_enabled"] !=
+ nas_server_obj.is_multi_protocol_enabled):
+ return True
+
+ # Is Back Up Enabled
+ if self.module.params["is_backup_only"] is not None and \
+ (nas_server_obj.is_backup_only is None or
+ self.module.params["is_backup_only"] !=
+ nas_server_obj.is_backup_only):
+ return True
+
+ # Is Packet Reflect Enabled
+ if self.module.params["is_packet_reflect_enabled"] is not None and \
+ (nas_server_obj.is_packet_reflect_enabled is None or
+ self.module.params["is_packet_reflect_enabled"] !=
+ nas_server_obj.is_packet_reflect_enabled):
+ return True
+
+ # Allow Unmapped User
+ if self.module.params["allow_unmapped_user"] is not None and \
+ (nas_server_obj.allow_unmapped_user is None or
+ self.module.params["allow_unmapped_user"] !=
+ nas_server_obj.allow_unmapped_user):
+ return True
+
+ # Enable Windows To Unix User Mapping Flag
+ nas_win_flag = \
+ nas_server_obj.is_windows_to_unix_username_mapping_enabled
+ input_win_flag = \
+ self.module.params["enable_windows_to_unix_username_mapping"]
+ if input_win_flag is not None and \
+ (nas_win_flag is None or nas_win_flag != input_win_flag):
+ return True
+
+ # Default Windows User
+ if self.module.params["default_windows_user"] is not None and \
+ (nas_server_obj.default_windows_user is None or
+ self.module.params["default_windows_user"] !=
+ nas_server_obj.default_windows_user):
+ return True
+
+ # Default Unix User
+ if self.module.params["default_unix_user"] is not None and \
+ (nas_server_obj.default_unix_user is None or
+ self.module.params["default_unix_user"] !=
+ nas_server_obj.default_unix_user):
+ return True
+
+ return False
+
+ def update_nas_server(self, nas_server_obj, new_name=None,
+ default_unix_user=None, default_windows_user=None,
+ is_rep_dest=None, is_multiprotocol_enabled=None,
+ allow_unmapped_user=None, is_backup_only=None,
+ is_packet_reflect_enabled=None, current_uds=None,
+ enable_win_to_unix_name_map=None):
+ """
+ The Details of the NAS Server will be updated in the function.
+ """
+ try:
+ nas_server_obj.modify(
+ name=new_name,
+ is_replication_destination=is_rep_dest,
+ is_backup_only=is_backup_only,
+ is_multi_protocol_enabled=is_multiprotocol_enabled,
+ default_unix_user=default_unix_user,
+ default_windows_user=default_windows_user,
+ allow_unmapped_user=allow_unmapped_user,
+ is_packet_reflect_enabled=is_packet_reflect_enabled,
+ enable_windows_to_unix_username=enable_win_to_unix_name_map,
+ current_unix_directory_service=current_uds)
+
+ except Exception as e:
+ error_msg = "Failed to Update parameters of NAS Server" \
+ " %s with error %s" % (nas_server_obj.name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def modify_replication_session(self, nas_server_obj, repl_session, replication_params):
+ """ Modify the replication session
+ :param: nas_server_obj: NAS server object
+ :param: repl_session: Replication session to be modified
+ :param: replication_params: Module input params
+ :return: True if modification is successful
+ """
+ try:
+ LOG.info("Modifying replication session of nas server %s", nas_server_obj.name)
+ modify_payload = {}
+ if replication_params['replication_mode'] and \
+ replication_params['replication_mode'] == 'manual':
+ rpo = -1
+ elif replication_params['rpo']:
+ rpo = replication_params['rpo']
+ name = repl_session.name
+ if replication_params['new_replication_name'] and \
+ name != replication_params['new_replication_name']:
+ name = replication_params['new_replication_name']
+
+ if repl_session.name != name:
+ modify_payload['name'] = name
+ if ((replication_params['replication_mode'] or replication_params['rpo']) and
+ repl_session.max_time_out_of_sync != rpo):
+ modify_payload['max_time_out_of_sync'] = rpo
+
+ if modify_payload:
+ repl_session.modify(**modify_payload)
+ return True
+
+ return False
+ except Exception as e:
+ errormsg = "Modifying replication session failed with error %s", e
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def enable_replication(self, nas_server_obj, replication, replication_reuse_resource):
+ """ Enable replication on NAS Server
+ :param: nas_server_obj: NAS Server object.
+ :param: replication: Dict which has all the replication parameter values.
+ :return: True if replication is enabled else False.
+ """
+ try:
+ # Validate replication params
+ self.validate_nas_server_replication_params(replication)
+ self.update_replication_params(replication, replication_reuse_resource)
+
+ repl_session = \
+ self.get_replication_session_on_filter(nas_server_obj, replication, "modify")
+ if repl_session:
+ return self.modify_replication_session(nas_server_obj, repl_session, replication)
+
+ self.validate_create_replication_params(replication)
+ replication_args_list = get_replication_args_list(replication)
+
+ # Get remote system
+ if 'replication_type' in replication and replication['replication_type'] == 'remote':
+ self.get_remote_system(replication, replication_args_list)
+
+ # Form parameters when replication_reuse_resource is False
+ if not replication_reuse_resource:
+ update_replication_arg_list(replication, replication_args_list, nas_server_obj)
+ nas_server_obj.replicate_with_dst_resource_provisioning(**replication_args_list)
+ else:
+ replication_args_list['dst_nas_server_id'] = replication['destination_nas_server_id']
+ nas_server_obj.replicate(**replication_args_list)
+ return True
+
+ if 'replication_type' in replication and replication['replication_type'] == 'local':
+ update_replication_arg_list(replication, replication_args_list, nas_server_obj)
+ nas_server_obj.replicate_with_dst_resource_provisioning(**replication_args_list)
+ return True
+
+ except Exception as e:
+ errormsg = "Enabling replication to the nas server %s failed " \
+ "with error %s" % (nas_server_obj.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def disable_replication(self, obj_nas, replication_params):
+ """ Remove replication from the nas server
+ :param: replication_params: Module input params
+ :param: obj_nas: NAS Server object
+ :return: True if disabling replication is successful
+ """
+ try:
+ LOG.info(("Disabling replication on the nas server %s", obj_nas.name))
+ if replication_params:
+ self.update_replication_params(replication_params, False)
+ repl_session = \
+ self.get_replication_session_on_filter(obj_nas, replication_params, "delete")
+ if repl_session:
+ repl_session.delete()
+ return True
+ return False
+ except Exception as e:
+ errormsg = "Disabling replication on the nas server %s failed " \
+ "with error %s" % (obj_nas.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_replication_session_on_filter(self, obj_nas, replication_params, action):
+ """ Retrieves replication session on nas server
+ :param: obj_nas: NAS server object
+ :param: replication_params: Module input params
+ :param: action: Specifies action as modify or delete
+ :return: Replication session based on filter
+ """
+ if replication_params and replication_params['remote_system']:
+ repl_session = \
+ self.get_replication_session(obj_nas, filter_key="remote_system_name",
+ replication_params=replication_params)
+ elif replication_params and replication_params['replication_name']:
+ repl_session = \
+ self.get_replication_session(obj_nas, filter_key="name",
+ name=replication_params['replication_name'])
+ else:
+ repl_session = self.get_replication_session(obj_nas, action=action)
+ if repl_session and action and replication_params and \
+ replication_params['replication_type'] == 'local' and \
+ repl_session.remote_system.name != self.unity_conn.name:
+ return None
+ return repl_session
+
+ def get_replication_session(self, obj_nas, filter_key=None, replication_params=None, name=None, action=None):
+ """ Retrieves the replication sessions configured for the nas server
+ :param: obj_nas: NAS server object
+ :param: filter_key: Key to filter replication sessions
+ :param: replication_params: Module input params
+ :param: name: Replication session name
+ :param: action: Specifies modify or delete action on replication session
+ :return: Replication session details
+ """
+ try:
+ repl_session = self.unity_conn.get_replication_session(src_resource_id=obj_nas.id)
+ if not filter_key and repl_session:
+ if len(repl_session) > 1:
+ if action:
+ error_msg = 'There are multiple replication sessions for the nas server.'\
+ ' Please specify replication_name in replication_params to %s.' % action
+ self.module.fail_json(msg=error_msg)
+ return repl_session
+ return repl_session[0]
+ for session in repl_session:
+ if filter_key == 'remote_system_name' and \
+ session.remote_system.name == replication_params['remote_system_name']:
+ return session
+ if filter_key == 'name' and session.name == name:
+ return session
+ return None
+ except Exception as e:
+ errormsg = "Retrieving replication session on the nas server failed " \
+ "with error %s", str(e)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_remote_system(self, replication, replication_args_list):
+ remote_system_name = replication['remote_system_name']
+ remote_system_list = self.unity_conn.get_remote_system()
+ for remote_system in remote_system_list:
+ if remote_system.name == remote_system_name:
+ replication_args_list['remote_system'] = remote_system
+ break
+ if 'remote_system' not in replication_args_list.keys():
+ errormsg = "Remote system %s is not found" % (remote_system_name)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def update_replication_params(self, replication, replication_reuse_resource):
+ """ Update replication dict with remote system information
+ :param: replication: Dict which has all the replication parameter values
+ :return: Updated replication Dict
+ """
+ try:
+ if 'replication_type' in replication and replication['replication_type'] == 'remote':
+ connection_params = {
+ 'unispherehost': replication['remote_system']['remote_system_host'],
+ 'username': replication['remote_system']['remote_system_username'],
+ 'password': replication['remote_system']['remote_system_password'],
+ 'validate_certs': replication['remote_system']['remote_system_verifycert'],
+ 'port': replication['remote_system']['remote_system_port']
+ }
+ remote_system_conn = utils.get_unity_unisphere_connection(
+ connection_params, application_type)
+ replication['remote_system_name'] = remote_system_conn.name
+ if replication['destination_pool_name'] is not None:
+ pool_object = remote_system_conn.get_pool(name=replication['destination_pool_name'])
+ replication['destination_pool_id'] = pool_object.id
+ if replication['destination_nas_server_name'] is not None and replication_reuse_resource:
+ nas_object = remote_system_conn.get_nas_server(name=replication['destination_nas_server_name'])
+ replication['destination_nas_server_id'] = nas_object.id
+ else:
+ replication['remote_system_name'] = self.unity_conn.name
+ if replication['destination_pool_name'] is not None:
+ pool_object = self.unity_conn.get_pool(name=replication['destination_pool_name'])
+ replication['destination_pool_id'] = pool_object.id
+ except Exception as e:
+ errormsg = "Updating replication params failed with error %s" % str(e)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_rpo(self, replication):
+ if 'replication_mode' in replication and replication['replication_mode'] == 'asynchronous' \
+ and replication['rpo'] is None:
+ errormsg = "rpo is required together with 'asynchronous' replication_mode."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ if (replication['rpo'] and (replication['rpo'] < 5 or replication['rpo'] > 1440)) \
+ and (replication['replication_mode'] and replication['replication_mode'] != 'manual' or
+ not replication['replication_mode'] and replication['rpo'] != -1):
+ errormsg = "rpo value should be in range of 5 to 1440"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_nas_server_replication_params(self, replication):
+ """ Validate NAS server replication params
+ :param: replication: Dict which has all the replication parameter values
+ """
+
+ # Valdiate replication
+ if replication is None:
+ errormsg = "Please specify replication_params to enable replication."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ else:
+ # validate destination pool info
+ if replication['destination_pool_id'] is not None and replication['destination_pool_name'] is not None:
+ errormsg = "'destination_pool_id' and 'destination_pool_name' is mutually exclusive."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ # Validate replication mode
+ self.validate_rpo(replication)
+ # Validate replication type
+ if replication['replication_type'] == 'remote' and replication['remote_system'] is None:
+ errormsg = "Remote_system is required together with 'remote' replication_type"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ # Validate destination NAS server name
+ if 'destination_nas_name' in replication and replication['destination_nas_name'] is not None:
+ dst_nas_server_name_length = len(replication['destination_nas_name'])
+ if dst_nas_server_name_length == 0 or dst_nas_server_name_length > 95:
+ errormsg = "destination_nas_name value should be in range of 1 to 95"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_create_replication_params(self, replication):
+ ''' Validate replication params '''
+ if replication['destination_pool_id'] is None and replication['destination_pool_name'] is None:
+ errormsg = "Either 'destination_pool_id' or 'destination_pool_name' is required."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ keys = ['replication_mode', 'replication_type']
+ for key in keys:
+ if replication[key] is None:
+ errormsg = "Please specify %s to enable replication." % key
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on NAS Server based on user parameters
+ chosen in playbook
+ """
+ state = self.module.params['state']
+ nas_server_name = self.module.params['nas_server_name']
+ nas_server_id = self.module.params['nas_server_id']
+ nas_server_new_name = self.module.params['nas_server_new_name']
+ default_unix_user = self.module.params['default_unix_user']
+ default_windows_user = self.module.params['default_windows_user']
+
+ is_replication_destination = \
+ self.module.params['is_replication_destination']
+ is_multiprotocol_enabled = \
+ self.module.params['is_multiprotocol_enabled']
+ allow_unmapped_user = self.module.params['allow_unmapped_user']
+ enable_windows_to_unix_username_mapping = \
+ self.module.params['enable_windows_to_unix_username_mapping']
+
+ is_backup_only = self.module.params['is_backup_only']
+ is_packet_reflect_enabled = \
+ self.module.params['is_packet_reflect_enabled']
+
+ current_uds = self.module.params['current_unix_directory_service']
+ replication = self.module.params['replication_params']
+ replication_state = self.module.params['replication_state']
+ replication_reuse_resource = self.module.params['replication_reuse_resource']
+ # Get the enum for the corresponding offline_availability
+ if current_uds:
+ current_uds = \
+ self.get_current_uds_enum(current_uds)
+
+ changed = False
+
+ if replication and replication_state is None:
+ self.module.fail_json(msg="Please specify replication_state along with replication_params")
+
+ '''
+ Get details of NAS Server.
+ '''
+ nas_server_obj = None
+ if nas_server_name or nas_server_id:
+ nas_server_obj = self.get_nas_server(nas_server_name,
+ nas_server_id)
+
+ # As creation is not supported and if NAS Server does not exist
+ # along with state as present, then error will be thrown.
+ if not nas_server_obj and state == "present":
+ msg = "NAS Server Resource not found. Please enter a valid " \
+ "Name/ID to get or modify the parameters of nas server."
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ '''
+ Update the parameters of NAS Server
+ '''
+ if nas_server_obj and state == "present":
+ update_flag = self.to_update(nas_server_obj, current_uds)
+ if update_flag:
+ self.update_nas_server(
+ nas_server_obj, nas_server_new_name, default_unix_user,
+ default_windows_user, is_replication_destination,
+ is_multiprotocol_enabled, allow_unmapped_user,
+ is_backup_only, is_packet_reflect_enabled,
+ current_uds, enable_windows_to_unix_username_mapping)
+ changed = True
+
+ # As deletion is not supported and if NAS Server exists along with
+ # state as absent, then error will be thrown.
+ if nas_server_obj and state == 'absent':
+ self.module.fail_json(msg="Deletion of NAS Server is "
+ "currently not supported.")
+
+ if state == 'present' and nas_server_obj and replication_state is not None:
+ if replication_state == 'enable':
+ changed = self.enable_replication(nas_server_obj, replication, replication_reuse_resource)
+ else:
+ changed = self.disable_replication(nas_server_obj, replication)
+
+ '''
+ Update the changed state and NAS Server details
+ '''
+ nas_server_details = None
+ if nas_server_obj:
+ nas_server_details = self.get_nas_server(
+ None, nas_server_obj.id)._get_properties()
+
+ self.result["changed"] = changed
+ self.result["nas_server_details"] = nas_server_details
+ self.module.exit_json(**self.result)
+
+
+def get_nasserver_parameters():
+ """
+ This method provides parameters required for the ansible NAS Server
+ modules on Unity
+ """
+
+ return dict(
+ nas_server_name=dict(), nas_server_id=dict(),
+ nas_server_new_name=dict(),
+ default_unix_user=dict(),
+ default_windows_user=dict(),
+ current_unix_directory_service=dict(
+ choices=["NIS", "LDAP", "LOCAL_THEN_NIS",
+ "LOCAL_THEN_LDAP", "NONE", "LOCAL"]),
+ is_replication_destination=dict(type='bool'),
+ is_backup_only=dict(type='bool'),
+ is_multiprotocol_enabled=dict(type='bool'),
+ allow_unmapped_user=dict(type='bool'),
+ enable_windows_to_unix_username_mapping=dict(type='bool'),
+ is_packet_reflect_enabled=dict(type='bool'),
+ replication_params=dict(type='dict', options=dict(
+ destination_nas_server_name=dict(type='str'),
+ replication_mode=dict(type='str', choices=['asynchronous', 'manual']),
+ rpo=dict(type='int'),
+ replication_type=dict(type='str', choices=['local', 'remote']),
+ remote_system=dict(type='dict',
+ options=dict(
+ remote_system_host=dict(type='str', required=True, no_log=True),
+ remote_system_verifycert=dict(type='bool', required=False,
+ default=True),
+ remote_system_username=dict(type='str', required=True),
+ remote_system_password=dict(type='str', required=True, no_log=True),
+ remote_system_port=dict(type='int', required=False, default=443, no_log=True)
+ )),
+ destination_pool_name=dict(type='str'),
+ destination_pool_id=dict(type='str'),
+ destination_sp=dict(type='str', choices=['SPA', 'SPB']),
+ is_backup=dict(type='bool'),
+ replication_name=dict(type='str'),
+ new_replication_name=dict(type='str')
+ )),
+ replication_reuse_resource=dict(type='bool'),
+ replication_state=dict(type='str', choices=['enable', 'disable']),
+ state=dict(required=True, choices=['present', 'absent'], type='str')
+ )
+
+
+def get_sp_enum(destination_sp):
+ """Getting correct enum values for Storage Processor
+ :param: destination_sp: Storage Processor to be used in Destination NAS Server.
+ :return: enum value for Storage Processor.
+ """
+ if utils.NodeEnum[destination_sp]:
+ destination_sp_enum = utils.NodeEnum[destination_sp]
+ return destination_sp_enum
+
+
+def get_replication_args_list(replication_params):
+ """Returns the replication args for payload"""
+ replication_args_list = {}
+
+ if replication_params['replication_name']:
+ replication_args_list['replication_name'] = replication_params['replication_name']
+ if 'replication_mode' in replication_params and \
+ replication_params['replication_mode'] == 'asynchronous':
+ replication_args_list['max_time_out_of_sync'] = replication_params['rpo']
+ else:
+ replication_args_list['max_time_out_of_sync'] = -1
+
+ return replication_args_list
+
+
+def update_replication_arg_list(replication, replication_args_list, nas_server_obj):
+ """ Update replication arg list
+ :param: replication: Dict which has all the replication parameter values
+ :param: replication_args_list: the existing list which should be updated
+ :param: nas_server_obj: NAS Server object on which replication is to be enabled
+ :return: Updated replication_args_list
+ """
+ if 'destination_sp' in replication and replication['destination_sp']:
+ dst_sp_enum = get_sp_enum(replication['destination_sp'])
+ replication_args_list['dst_sp'] = dst_sp_enum
+
+ replication_args_list['dst_pool_id'] = replication['destination_pool_id']
+
+ if 'is_backup' in replication and replication['is_backup']:
+ replication_args_list['is_backup_only'] = replication['is_backup']
+
+ if replication['replication_type'] == 'local':
+ replication_args_list['dst_nas_server_name'] = "DR_" + nas_server_obj.name
+ if 'destination_nas_server_name' in replication and replication['destination_nas_server_name'] is not None:
+ replication_args_list['dst_nas_server_name'] = replication['destination_nas_server_name']
+ else:
+ if replication['destination_nas_server_name'] is None:
+ replication_args_list['dst_nas_server_name'] = nas_server_obj.name
+
+
+def main():
+ """ Create Unity NAS Server object and perform action on it
+ based on user input from playbook"""
+ obj = NASServer()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/nfs.py b/ansible_collections/dellemc/unity/plugins/modules/nfs.py
new file mode 100644
index 000000000..e6223066b
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/nfs.py
@@ -0,0 +1,1873 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing nfs export on Unity"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: nfs
+version_added: '1.1.0'
+short_description: Manage NFS export on Unity storage system
+description:
+- Managing NFS export on Unity storage system includes-
+ Create new NFS export,
+ Modify NFS export attributes,
+ Display NFS export details,
+ Delete NFS export.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Vivek Soni (@v-soni11) <ansible.team@dell.com>
+
+options:
+ nfs_export_name:
+ description:
+ - Name of the nfs export.
+ - Mandatory for create operation.
+ - Specify either I(nfs_export_name) or I(nfs_export_id) (but not both) for any
+ operation.
+ type: str
+ nfs_export_id:
+ description:
+ - ID of the nfs export.
+ - This is a unique ID generated by Unity storage system.
+ type: str
+ filesystem_name:
+ description:
+ - Name of the filesystem for which NFS export will be created.
+ - Either filesystem or snapshot is required for creation of the NFS.
+ - If I(filesystem_name) is specified, then I(nas_server) is required to uniquely
+ identify the filesystem.
+ - If filesystem parameter is provided, then snapshot cannot be specified.
+ type: str
+ filesystem_id:
+ description:
+ - ID of the filesystem.
+ - This is a unique ID generated by Unity storage system.
+ type: str
+ snapshot_name:
+ description:
+ - Name of the snapshot for which NFS export will be created.
+ - Either filesystem or snapshot is required for creation of the NFS
+ export.
+ - If snapshot parameter is provided, then filesystem cannot be specified.
+ type: str
+ snapshot_id:
+ description:
+ - ID of the snapshot.
+ - This is a unique ID generated by Unity storage system.
+ type: str
+ nas_server_name:
+ description:
+ - Name of the NAS server on which filesystem will be hosted.
+ type: str
+ nas_server_id:
+ description:
+ - ID of the NAS server on which filesystem will be hosted.
+ type: str
+ path:
+ description:
+ - Local path to export relative to the NAS server root.
+ - With NFS, each export of a file_system or file_snap must have a unique
+ local path.
+ - Mandatory while creating NFS export.
+ type: str
+ description:
+ description:
+ - Description of the NFS export.
+ - Optional parameter when creating a NFS export.
+ - To modify description, pass the new value in I(description) field.
+ - To remove description, pass the empty value in I(description) field.
+ type: str
+ host_state:
+ description:
+ - Define whether the hosts can access the NFS export.
+ - Required when adding or removing access of hosts from the export.
+ type: str
+ choices: ['present-in-export', 'absent-in-export']
+ anonymous_uid:
+ description:
+ - Specifies the user ID of the anonymous account.
+ - If not specified at the time of creation, it will be set to 4294967294.
+ type: int
+ anonymous_gid:
+ description:
+ - Specifies the group ID of the anonymous account.
+ - If not specified at the time of creation, it will be set to 4294967294.
+ type: int
+ state:
+ description:
+ - State variable to determine whether NFS export will exist or not.
+ required: true
+ type: str
+ choices: ['absent', 'present']
+ default_access:
+ description:
+ - Default access level for all hosts that can access the NFS export.
+ - For hosts that need different access than the default,
+ they can be configured by adding to the list.
+ - If I(default_access) is not mentioned during creation, then NFS export will
+ be created with C(NO_ACCESS).
+ type: str
+ choices: ['NO_ACCESS', 'READ_ONLY', 'READ_WRITE', 'ROOT',
+ 'READ_ONLY_ROOT']
+ min_security:
+ description:
+ - NFS enforced security type for users accessing a NFS export.
+ - If not specified at the time of creation, it will be set to C(SYS).
+ type: str
+ choices: ['SYS', 'KERBEROS', 'KERBEROS_WITH_INTEGRITY',
+ 'KERBEROS_WITH_ENCRYPTION']
+ adv_host_mgmt_enabled:
+ description:
+ - If C(false), allows you to specify hosts without first having to register them.
+ - Mandatory while adding access hosts.
+ type: bool
+ no_access_hosts:
+ description:
+ - Hosts with no access to the NFS export.
+ - List of dictionaries. Each dictionary will have any of the keys from
+ I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ type: list
+ elements: dict
+ suboptions:
+ host_name:
+ description:
+ - Name of the host.
+ type: str
+ host_id:
+ description:
+ - ID of the host.
+ type: str
+ ip_address:
+ description:
+ - IP address of the host.
+ type: str
+ subnet:
+ description:
+ - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+ type: str
+ netgroup:
+ description:
+ - Netgroup that is defined in NIS or the local netgroup file.
+ type: str
+ domain:
+ description:
+ - DNS domain, where all NFS clients in the domain are included in the host list.
+ type: str
+ read_only_hosts:
+ description:
+ - Hosts with read-only access to the NFS export.
+ - List of dictionaries. Each dictionary will have any of the keys from
+ I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ type: list
+ elements: dict
+ suboptions:
+ host_name:
+ description:
+ - Name of the host.
+ type: str
+ host_id:
+ description:
+ - ID of the host.
+ type: str
+ ip_address:
+ description:
+ - IP address of the host.
+ type: str
+ subnet:
+ description:
+ - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+ type: str
+ netgroup:
+ description:
+ - Netgroup that is defined in NIS or the local netgroup file.
+ type: str
+ domain:
+ description:
+ - DNS domain, where all NFS clients in the domain are included in the host list.
+ type: str
+ read_only_root_hosts:
+ description:
+ - Hosts with read-only for root user access to the NFS export.
+ - List of dictionaries. Each dictionary will have any of the keys from
+ I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ type: list
+ elements: dict
+ suboptions:
+ host_name:
+ description:
+ - Name of the host.
+ type: str
+ host_id:
+ description:
+ - ID of the host.
+ type: str
+ ip_address:
+ description:
+ - IP address of the host.
+ type: str
+ subnet:
+ description:
+ - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+ type: str
+ netgroup:
+ description:
+ - Netgroup that is defined in NIS or the local netgroup file.
+ type: str
+ domain:
+ description:
+ - DNS domain, where all NFS clients in the domain are included in the host list.
+ type: str
+ read_write_hosts:
+ description:
+ - Hosts with read and write access to the NFS export.
+ - List of dictionaries. Each dictionary will have any of the keys from
+ I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ type: list
+ elements: dict
+ suboptions:
+ host_name:
+ description:
+ - Name of the host.
+ type: str
+ host_id:
+ description:
+ - ID of the host.
+ type: str
+ ip_address:
+ description:
+ - IP address of the host.
+ type: str
+ subnet:
+ description:
+ - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+ type: str
+ netgroup:
+ description:
+ - Netgroup that is defined in NIS or the local netgroup file.
+ type: str
+ domain:
+ description:
+ - DNS domain, where all NFS clients in the domain are included in the host list.
+ type: str
+ read_write_root_hosts:
+ description:
+ - Hosts with read and write for root user access to the NFS export.
+ - List of dictionaries. Each dictionary will have any of the keys from
+ I(host_name), I(host_id), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(true) then the accepted keys are I(host_name), I(host_id) and I(ip_address).
+ - If I(adv_host_mgmt_enabled) is C(false) then the accepted keys are I(host_name), I(subnet), I(netgroup), I(domain) and I(ip_address).
+ type: list
+ elements: dict
+ suboptions:
+ host_name:
+ description:
+ - Name of the host.
+ type: str
+ host_id:
+ description:
+ - ID of the host.
+ type: str
+ ip_address:
+ description:
+ - IP address of the host.
+ type: str
+ subnet:
+ description:
+ - Subnet can be an 'IP address/netmask' or 'IP address/prefix length'.
+ type: str
+ netgroup:
+ description:
+ - Netgroup that is defined in NIS or the local netgroup file.
+ type: str
+ domain:
+ description:
+ - DNS domain, where all NFS clients in the domain are included in the host list.
+ type: str
+notes:
+- The I(check_mode) is not supported.
+"""
+
+EXAMPLES = r"""
+- name: Create nfs export from filesystem
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ path: '/'
+ filesystem_id: "fs_377"
+ state: "present"
+
+- name: Create nfs export from snapshot
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_snap"
+ path: '/'
+ snapshot_name: "ansible_fs_snap"
+ state: "present"
+
+- name: Modify nfs export
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ nas_server_id: "nas_3"
+ description: ""
+ default_access: "READ_ONLY_ROOT"
+ anonymous_gid: 4294967290
+ anonymous_uid: 4294967290
+ state: "present"
+
+- name: Add host in nfs export with adv_host_mgmt_enabled as true
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ filesystem_id: "fs_377"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "Host_1"
+ read_only_hosts:
+ - host_id: "Host_2"
+ read_only_root_hosts:
+ - host_name: "host_name1"
+ read_write_hosts:
+ - host_name: "host_name2"
+ read_write_root_hosts:
+ - ip_address: "1.1.1.1"
+ host_state: "present-in-export"
+ state: "present"
+
+- name: Remove host in nfs export with adv_host_mgmt_enabled as true
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ filesystem_id: "fs_377"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "Host_1"
+ read_only_hosts:
+ - host_id: "Host_2"
+ read_only_root_hosts:
+ - host_name: "host_name1"
+ read_write_hosts:
+ - host_name: "host_name2"
+ read_write_root_hosts:
+ - ip_address: "1.1.1.1"
+ host_state: "absent-in-export"
+ state: "present"
+
+- name: Add host in nfs export with adv_host_mgmt_enabled as false
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ filesystem_id: "fs_377"
+ adv_host_mgmt_enabled: false
+ no_access_hosts:
+ - domain: "google.com"
+ read_only_hosts:
+ - netgroup: "netgroup_admin"
+ read_only_root_hosts:
+ - host_name: "host5"
+ read_write_hosts:
+ - subnet: "168.159.57.4/255.255.255.0"
+ read_write_root_hosts:
+ - ip_address: "10.255.2.4"
+ host_state: "present-in-export"
+ state: "present"
+
+- name: Remove host in nfs export with adv_host_mgmt_enabled as false
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_from_fs"
+ filesystem_id: "fs_377"
+ adv_host_mgmt_enabled: false
+ no_access_hosts:
+ - domain: "google.com"
+ read_only_hosts:
+ - netgroup: "netgroup_admin"
+ read_only_root_hosts:
+ - host_name: "host5"
+ read_write_hosts:
+ - subnet: "168.159.57.4/255.255.255.0"
+ read_write_root_hosts:
+ - ip_address: "10.255.2.4"
+ host_state: "absent-in-export"
+ state: "present"
+
+- name: Get nfs details
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_id: "NFSShare_291"
+ state: "present"
+
+- name: Delete nfs export by nfs name
+ dellemc.unity.nfs:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nfs_export_name: "ansible_nfs_name"
+ nas_server_name: "ansible_nas_name"
+ state: "absent"
+"""
+
+RETURN = r"""
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: "false"
+
+nfs_share_details:
+ description: Details of the nfs export.
+ returned: When nfs export exists.
+ type: dict
+ contains:
+ anonymous_uid:
+ description: User ID of the anonymous account
+ type: int
+ anonymous_gid:
+ description: Group ID of the anonymous account
+ type: int
+ default_access:
+ description: Default access level for all hosts that can access export
+ type: str
+ description:
+ description: Description about the nfs export
+ type: str
+ id:
+ description: ID of the nfs export
+ type: str
+ min_security:
+ description: NFS enforced security type for users accessing an export
+ type: str
+ name:
+ description: Name of the nfs export
+ type: str
+ no_access_hosts_string:
+ description: Hosts with no access to the nfs export
+ type: str
+ read_only_hosts_string:
+ description: Hosts with read-only access to the nfs export
+ type: str
+ read_only_root_hosts_string:
+ description: Hosts with read-only for root user access to the nfs export
+ type: str
+ read_write_hosts_string:
+ description: Hosts with read and write access to the nfs export
+ type: str
+ read_write_root_hosts_string:
+ description: Hosts with read and write for root user access to export
+ type: str
+ type:
+ description: NFS export type. i.e. filesystem or snapshot
+ type: str
+ export_paths:
+ description: Export paths that can be used to mount and access export
+ type: list
+ filesystem:
+ description: Details of the filesystem on which nfs export is present
+ type: dict
+ contains:
+ UnityFileSystem:
+ description: filesystem details
+ type: dict
+ contains:
+ id:
+ description: ID of the filesystem
+ type: str
+ name:
+ description: Name of the filesystem
+ type: str
+ nas_server:
+ description: Details of the nas server
+ type: dict
+ contains:
+ UnityNasServer:
+ description: NAS server details
+ type: dict
+ contains:
+ id:
+ description: ID of the nas server
+ type: str
+ name:
+ description: Name of the nas server
+ type: str
+ sample: {
+ 'anonymous_gid': 4294967294,
+ 'anonymous_uid': 4294967294,
+ 'creation_time': '2022-03-09 15:05:34.720000+00:00',
+ 'default_access': 'NFSShareDefaultAccessEnum.NO_ACCESS',
+ 'description': '',
+ 'export_option': 1,
+ 'export_paths': [
+ '**.***.**.**:/dummy-share-123'
+ ],
+ 'filesystem': {
+ 'UnityFileSystem': {
+ 'id': 'fs_id_1',
+ 'name': 'fs_name_1'
+ }
+ },
+ 'host_accesses': None,
+ 'id': 'NFSShare_14393',
+ 'is_read_only': None,
+ 'min_security': 'NFSShareSecurityEnum.SYS',
+ 'modification_time': '2022-04-25 08:12:28.179000+00:00',
+ 'name': 'dummy-share-123',
+ 'nfs_owner_username': None,
+ 'no_access_hosts': None,
+ 'no_access_hosts_string': 'host1,**.***.*.*',
+ 'path': '/',
+ 'read_only_hosts': None,
+ 'read_only_hosts_string': '',
+ 'read_only_root_access_hosts': None,
+ 'read_only_root_hosts_string': '',
+ 'read_write_hosts': None,
+ 'read_write_hosts_string': '',
+ 'read_write_root_hosts_string': '',
+ 'role': 'NFSShareRoleEnum.PRODUCTION',
+ 'root_access_hosts': None,
+ 'snap': None,
+ 'type': 'NFSTypeEnum.NFS_SHARE',
+ 'existed': True,
+ 'nas_server': {
+ 'UnityNasServer': {
+ 'id': 'nas_id_1',
+ 'name': 'dummy_nas_server'
+ }
+ }
+ }
+"""
+
+import re
+import traceback
+
+try:
+ from ipaddress import ip_network, IPv4Network, IPv6Network
+ HAS_IPADDRESS, IP_ADDRESS_IMP_ERR = True, None
+except ImportError:
+ HAS_IPADDRESS, IP_ADDRESS_IMP_ERR = False, traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('nfs')
+
+DEFAULT_ACCESS_LIST = ['NO_ACCESS', 'READ_ONLY', 'READ_WRITE', 'ROOT',
+ 'READ_ONLY_ROOT']
+MIN_SECURITY_LIST = ['SYS', 'KERBEROS', 'KERBEROS_WITH_INTEGRITY',
+ 'KERBEROS_WITH_ENCRYPTION']
+HOST_DICT = dict(type='list', required=False, elements='dict',
+ options=dict(host_name=dict(),
+ host_id=dict(),
+ ip_address=dict(),
+ subnet=dict(),
+ netgroup=dict(),
+ domain=dict()))
+HOST_STATE_LIST = ['present-in-export', 'absent-in-export']
+STATE_LIST = ['present', 'absent']
+
+application_type = "Ansible/1.6.0"
+
+
+class NFS(object):
+ """Class with nfs export operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_nfs_parameters())
+
+ mutually_exclusive = [['nfs_export_id', 'nas_server_id'],
+ ['nfs_export_id', 'nas_server_name'],
+ ['filesystem_id', 'filesystem_name',
+ 'snapshot_id', 'snapshot_name'],
+ ['nas_server_id', 'nas_server_name']]
+ required_one_of = [['nfs_export_id', 'nfs_export_name']]
+
+ """ initialize the ansible module """
+ self.module = AnsibleModule(
+ argument_spec=self.module_params, supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+
+ if not HAS_IPADDRESS:
+ self.module.fail_json(msg=missing_required_lib("ipaddress"),
+ exception=IP_ADDRESS_IMP_ERR)
+
+ self.unity = utils.get_unity_unisphere_connection(self.module.params,
+ application_type)
+ self.cli = self.unity._cli
+
+ self.is_given_nfs_for_fs = None
+ if self.module.params['filesystem_name'] or \
+ self.module.params['filesystem_id']:
+ self.is_given_nfs_for_fs = True
+ elif self.module.params['snapshot_name'] or \
+ self.module.params['snapshot_id']:
+ self.is_given_nfs_for_fs = False
+
+ # Contain hosts input & output parameters
+ self.host_param_mapping = {
+ 'no_access_hosts': 'no_access_hosts_string',
+ 'read_only_hosts': 'read_only_hosts_string',
+ 'read_only_root_hosts': 'read_only_root_hosts_string',
+ 'read_write_hosts': 'read_write_hosts_string',
+ 'read_write_root_hosts': 'read_write_root_hosts_string'
+ }
+
+ # Default_access mapping. keys are giving by user & values are
+ # accepted by SDK
+ self.default_access = {'READ_ONLY_ROOT': 'RO_ROOT'}
+
+ LOG.info('Got the unity instance for provisioning on Unity')
+
+ def validate_host_access_data(self, host_dict):
+ """
+ Validate host access data
+ :param host_dict: Host access data
+ :return None
+ """
+ fqdn_pat = re.compile(r'(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{0,62}'
+ r'[a-zA-Z0-9]\.)+[a-zA-Z]{2,63}$)')
+
+ if host_dict.get('host_name'):
+ version = get_ip_version(host_dict.get('host_name'))
+ if version in (4, 6):
+ msg = "IP4/IP6: %s given in host_name instead " \
+ "of name" % host_dict.get('host_name')
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if host_dict.get('ip_address'):
+ ip_or_fqdn = host_dict.get('ip_address')
+ version = get_ip_version(ip_or_fqdn)
+ # validate its FQDN or not
+ if version == 0 and not fqdn_pat.match(ip_or_fqdn):
+ msg = "%s is not a valid FQDN" % ip_or_fqdn
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if host_dict.get('subnet'):
+ subnet = host_dict.get('subnet')
+ subnet_info = subnet.split("/")
+ if len(subnet_info) != 2:
+ msg = "Subnet should be in format 'IP address/netmask' or 'IP address/prefix length'"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def validate_adv_host_mgmt_enabled_check(self, host_dict):
+ """
+ Validate adv_host_mgmt_enabled check
+ :param host_dict: Host access data
+ :return None
+ """
+ host_dict_keys_set = set(host_dict.keys())
+ adv_host_mgmt_enabled_true_set = {'host_name', 'host_id', 'ip_address'}
+ adv_host_mgmt_enabled_false_set = {'host_name', 'subnet', 'domain', 'netgroup', 'ip_address'}
+ adv_host_mgmt_enabled_true_diff = host_dict_keys_set - adv_host_mgmt_enabled_true_set
+ adv_host_mgmt_enabled_false_diff = host_dict_keys_set - adv_host_mgmt_enabled_false_set
+ if self.module.params['adv_host_mgmt_enabled'] and adv_host_mgmt_enabled_true_diff != set():
+ msg = "If 'adv_host_mgmt_enabled' is true then host access should only have %s" % adv_host_mgmt_enabled_true_set
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ elif not self.module.params['adv_host_mgmt_enabled'] and adv_host_mgmt_enabled_false_diff != set():
+ msg = "If 'adv_host_mgmt_enabled' is false then host access should only have %s" % adv_host_mgmt_enabled_false_set
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def validate_host_access_input_params(self):
+ """
+ Validate host access params
+ :return None
+ """
+ for param in list(self.host_param_mapping.keys()):
+ if self.module.params[param] and (not self.module.params[
+ 'host_state'] or self.module.params[
+ 'adv_host_mgmt_enabled'] is None):
+ msg = "'host_state' and 'adv_host_mgmt_enabled' is required along with: %s" % param
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ elif self.module.params[param]:
+ for host_dict in self.module.params[param]:
+ host_dict = {k: v for k, v in host_dict.items() if v}
+ self.validate_adv_host_mgmt_enabled_check(host_dict)
+ self.validate_host_access_data(host_dict)
+
+ def validate_module_attributes(self):
+ """
+ Validate module attributes
+ :return None
+ """
+ param_list = ['nfs_export_name', 'nfs_export_id', 'filesystem_name',
+ 'filesystem_id', 'nas_server_id',
+ 'snapshot_name', 'snapshot_id', 'path']
+
+ for param in param_list:
+ if self.module.params[param] and \
+ len(self.module.params[param].strip()) == 0:
+ msg = "Please provide valid value for: %s" % param
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def validate_input(self):
+ """ Validate input parameters """
+
+ if self.module.params['nfs_export_name'] and \
+ not self.module.params['snapshot_name'] and \
+ not self.module.params['snapshot_id']:
+ if ((self.module.params['filesystem_name']) and
+ (not self.module.params['nas_server_id'] and
+ not self.module.params['nas_server_name'])):
+ msg = "Please provide nas server id or name along with " \
+ "filesystem name and nfs name"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if ((not self.module.params['nas_server_id']) and
+ (not self.module.params['nas_server_name']) and
+ (not self.module.params['filesystem_id'])):
+ msg = "Please provide either nas server id/name or " \
+ "filesystem id"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ self.validate_module_attributes()
+ self.validate_host_access_input_params()
+
+ def get_nfs_id_or_name(self):
+ """ Provide nfs_export_id or nfs_export_name user given value
+
+ :return: value provided by user in nfs_export_id/nfs_export_name
+ :rtype: str
+ """
+ if self.module.params['nfs_export_id']:
+ return self.module.params['nfs_export_id']
+ return self.module.params['nfs_export_name']
+
+ def get_nas_from_given_input(self):
+ """ Get nas server object
+
+ :return: nas server object
+ :rtype: UnityNasServer
+ """
+ LOG.info("Getting nas server details")
+ if not self.module.params['nas_server_id'] and not \
+ self.module.params['nas_server_name']:
+ return None
+ id_or_name = self.module.params['nas_server_id'] if \
+ self.module.params['nas_server_id'] else self.module.params[
+ 'nas_server_name']
+ try:
+ nas = self.unity.get_nas_server(
+ _id=self.module.params['nas_server_id'],
+ name=self.module.params['nas_server_name'])
+ except utils.UnityResourceNotFoundError as e:
+ # In case of incorrect name
+ msg = "Given nas server not found error: %s" % str(e)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except utils.HTTPClientError as e:
+ if e.http_status == 401:
+ msg = "Failed to get nas server: %s due to incorrect " \
+ "username/password error: %s" % (id_or_name, str(e))
+ else:
+ msg = "Failed to get nas server: %s error: %s" % (
+ id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to get nas server: %s error: %s" % (
+ id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if nas and not nas.existed:
+ # In case of incorrect id, sdk return nas object whose attribute
+ # existed=false, instead of raising UnityResourceNotFoundError
+ msg = "Please check nas details it does not exists"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ LOG.info("Got nas server details")
+ return nas
+
+ def get_nfs_share(self, id=None, name=None):
+ """ Get the nfs export
+
+ :return: nfs_export object if nfs exists else None
+ :rtype: UnityNfsShare or None
+ """
+ try:
+ if not id and not name:
+ msg = "Please give nfs id/name"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ id_or_name = id if id else name
+ LOG.info("Getting nfs export: %s", id_or_name)
+ if id:
+ # Get nfs details from nfs ID
+ if self.is_given_nfs_for_fs:
+ nfs = self.unity.get_nfs_share(
+ _id=id, filesystem=self.fs_obj)
+ elif self.is_given_nfs_for_fs is False:
+ # nfs from snap
+ nfs = self.unity.get_nfs_share(_id=id, snap=self.snap_obj)
+ else:
+ nfs = self.unity.get_nfs_share(_id=id)
+ else:
+ # Get nfs details from nfs name
+ if self.is_given_nfs_for_fs:
+ nfs = self.unity.get_nfs_share(
+ name=name, filesystem=self.fs_obj)
+ elif self.is_given_nfs_for_fs is False:
+ # nfs from snap
+ nfs = self.unity.get_nfs_share(
+ name=name, snap=self.snap_obj)
+ else:
+ nfs = self.unity.get_nfs_share(name=name)
+
+ if isinstance(nfs, utils.UnityNfsShareList):
+ # This block will be executed, when we are trying to get nfs
+ # details using nfs name & nas server.
+ nfs_list = nfs
+ LOG.info("Multiple nfs export with same name: %s "
+ "found", id_or_name)
+ if self.nas_obj:
+ for n in nfs_list:
+ if n.filesystem.nas_server == self.nas_obj:
+ return n
+ msg = "Multiple nfs share with same name: %s found. " \
+ "Given nas server is not correct. Please check"
+ else:
+ msg = "Multiple nfs share with same name: %s found. " \
+ "Please give nas server"
+ else:
+ # nfs is instance of UnityNfsShare class
+ if nfs and nfs.existed:
+ if self.nas_obj and nfs.filesystem.nas_server != \
+ self.nas_obj:
+ msg = "nfs found but nas details given is incorrect"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ LOG.info("Successfully got nfs share for: %s", id_or_name)
+ return nfs
+ elif nfs and not nfs.existed:
+ # in case of incorrect id, sdk returns nfs object whose
+ # attribute existed=False
+ msg = "Please check incorrect nfs id is given"
+ else:
+ msg = "Failed to get nfs share: %s" % id_or_name
+ except utils.UnityResourceNotFoundError as e:
+ msg = "NFS share: %(id_or_name)s not found " \
+ "error: %(err)s" % {'id_or_name': id_or_name, 'err': str(e)}
+ LOG.info(str(msg))
+ return None
+ except utils.HTTPClientError as e:
+ if e.http_status == 401:
+ msg = "Failed to get nfs share: %s due to incorrect " \
+ "username/password error: %s" % (id_or_name, str(e))
+ else:
+ msg = "Failed to get nfs share: %s error: %s" % (id_or_name,
+ str(e))
+ except utils.StoropsConnectTimeoutError as e:
+ msg = "Failed to get nfs share: %s check unispherehost IP: %s " \
+ "error: %s" % (id_or_name,
+ self.module.params['nfs_export_id'], str(e))
+ except Exception as e:
+ msg = "Failed to get nfs share: %s error: %s" % (id_or_name,
+ str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def delete_nfs_share(self, nfs_obj):
+ """ Delete nfs share
+
+ :param nfs: NFS share obj
+ :type nfs: UnityNfsShare
+ :return: None
+ """
+ try:
+ LOG.info("Deleting nfs share: %s", self.get_nfs_id_or_name())
+ nfs_obj.delete()
+ LOG.info("Deleted nfs share")
+ except Exception as e:
+ msg = "Failed to delete nfs share, error: %s" % str(e)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_filesystem(self):
+ """ Get filesystem obj
+
+ :return: filesystem obj
+ :rtype: UnityFileSystem
+ """
+ if self.module.params['filesystem_id']:
+ id_or_name = self.module.params['filesystem_id']
+ elif self.module.params['filesystem_name']:
+ id_or_name = self.module.params['filesystem_name']
+ else:
+ msg = "Please provide filesystem ID/name, to get filesystem"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ try:
+ if self.module.params['filesystem_name']:
+ if not self.nas_obj:
+ err_msg = "NAS Server is required to get the filesystem"
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+ LOG.info("Getting filesystem by name: %s", id_or_name)
+ fs_obj = self.unity.get_filesystem(
+ name=self.module.params['filesystem_name'],
+ nas_server=self.nas_obj)
+ elif self.module.params['filesystem_id']:
+ LOG.info("Getting filesystem by ID: %s", id_or_name)
+ fs_obj = self.unity.get_filesystem(
+ _id=self.module.params['filesystem_id'])
+ except utils.UnityResourceNotFoundError as e:
+ msg = "Filesystem: %s not found error: %s" % (
+ id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except utils.HTTPClientError as e:
+ if e.http_status == 401:
+ msg = "Failed to get filesystem due to incorrect " \
+ "username/password error: %s" % str(e)
+ else:
+ msg = "Failed to get filesystem error: %s" % str(e)
+ LOG.error(msg)
+ except Exception as e:
+ msg = "Failed to get filesystem: %s error: %s" % (
+ id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if fs_obj and fs_obj.existed:
+ LOG.info("Got the filesystem: %s", id_or_name)
+ return fs_obj
+ else:
+ msg = "Filesystem: %s does not exists" % id_or_name
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_snapshot(self):
+ """ Get snapshot obj
+
+ :return: Snapshot obj
+ :rtype: UnitySnap
+ """
+ if self.module.params['snapshot_id']:
+ id_or_name = self.module.params['snapshot_id']
+ elif self.module.params['snapshot_name']:
+ id_or_name = self.module.params['snapshot_name']
+ else:
+ msg = "Please provide snapshot ID/name, to get snapshot"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ LOG.info("Getting snapshot: %s", id_or_name)
+ try:
+ if id_or_name:
+ snap_obj = self.unity.get_snap(
+ _id=self.module.params['snapshot_id'],
+ name=self.module.params['snapshot_name'])
+ else:
+ msg = "Failed to get the snapshot. Please provide snapshot " \
+ "details"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except utils.UnityResourceNotFoundError as e:
+ msg = "Failed to get snapshot: %s error: %s" % (id_or_name,
+ str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ except utils.HTTPClientError as e:
+ if e.http_status == 401:
+ msg = "Failed to get snapshot due to incorrect " \
+ "username/password error: %s" % str(e)
+ else:
+ msg = "Failed to get snapshot error: %s" % str(e)
+ LOG.error(msg)
+ except Exception as e:
+ msg = "Failed to get snapshot: %s error: %s" % (id_or_name,
+ str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if snap_obj and snap_obj.existed:
+ LOG.info("Successfully got the snapshot: %s", id_or_name)
+ return snap_obj
+ else:
+ msg = "Snapshot: %s does not exists" % id_or_name
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_host_obj(self, host_id=None, host_name=None, ip_address=None):
+ """
+ Get host object
+ :param host_id: ID of the host
+ :param host_name: Name of the host
+ :param ip_address: Network address of the host
+ :return: Host object
+ :rtype: object
+ """
+ try:
+ host_obj = None
+ host = None
+ if host_id:
+ host = host_id
+ host_obj = self.unity.get_host(_id=host_id)
+ elif host_name:
+ host = host_name
+ host_obj = self.unity.get_host(name=host_name)
+ elif ip_address:
+ host = ip_address
+ host_obj = self.unity.get_host(address=ip_address)
+
+ if host_obj and host_obj.existed:
+ LOG.info("Successfully got host: %s", host_obj.name)
+ return host_obj
+ else:
+ msg = f'Host : {host} does not exists'
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = f'Failed to get host {host}, error: {e}'
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_host_access_string_value(self, host_dict):
+ """
+ Form host access string
+ :host_dict Host access type info
+ :return Host access data in string
+ """
+ if host_dict.get("host_id"):
+ return self.get_host_obj(host_id=(host_dict.get("host_id"))).name + ','
+ elif host_dict.get("host_name"):
+ return host_dict.get(
+ "host_name") + ','
+ elif host_dict.get("ip_address"):
+ return host_dict.get(
+ "ip_address") + ','
+ elif host_dict.get("subnet"):
+ return host_dict.get(
+ "subnet") + ','
+ elif host_dict.get("domain"):
+ return "*." + host_dict.get(
+ "domain") + ','
+ elif host_dict.get("netgroup"):
+ return "@" + host_dict.get(
+ "netgroup") + ','
+
+ def get_host_obj_value(self, host_dict):
+ """
+ Form host access value using host object
+ :host_dict Host access type info
+ :return Host object
+ """
+ if host_dict.get("host_id"):
+ return self.get_host_obj(host_id=host_dict.get("host_id"))
+ elif host_dict.get("host_name"):
+ return self.get_host_obj(host_name=host_dict.get("host_name"))
+ elif host_dict.get("ip_address"):
+ return self.get_host_obj(ip_address=host_dict.get("ip_address"))
+
+ def format_host_dict_for_adv_mgmt(self):
+ """
+ Form host access for advance management
+ :return: Formatted Host access type info
+ :rtype: dict
+ """
+ result_host = {}
+ for param in list(self.host_param_mapping.keys()):
+ if self.module.params[param]:
+ result_host[param] = []
+ for host_dict in self.module.params[param]:
+ result_host[param].append(self.get_host_obj_value(host_dict))
+
+ if 'read_only_root_hosts' in result_host:
+ result_host['read_only_root_access_hosts'] = result_host.pop('read_only_root_hosts')
+ if 'read_write_root_hosts' in result_host:
+ result_host['root_access_hosts'] = result_host.pop('read_write_root_hosts')
+ return result_host
+
+ def format_host_dict_for_non_adv_mgmt(self):
+ """
+ Form host access for non advance management option
+ :return: Formatted Host access type info
+ :rtype: dict
+ """
+ result_host = {}
+ for param in list(self.host_param_mapping.keys()):
+ if self.module.params[param]:
+ result_host[param] = ''
+ for host_dict in self.module.params[param]:
+ result_host[param] += self.get_host_access_string_value(host_dict)
+
+ if result_host != {}:
+ # Since we are supporting HOST STRING parameters instead of HOST
+ # parameters, so lets change given input HOST parameter name to
+ # HOST STRING parameter name and strip trailing ','
+ result_host = {self.host_param_mapping[k]: v[:-1] for k, v in result_host.items()}
+ return result_host
+
+ def get_host_dict_from_pb(self):
+ """ Traverse all given hosts params and provides with host dict,
+ which has respective host str param name with its value
+ required by SDK
+
+ :return: dict with key named as respective host str param name & value
+ required by SDK
+ :rtype: dict
+ """
+ LOG.info("Getting host parameters")
+ result_host = {}
+ if self.module.params['host_state']:
+ if not self.module.params['adv_host_mgmt_enabled']:
+ result_host = self.format_host_dict_for_non_adv_mgmt()
+ else:
+ result_host = self.format_host_dict_for_adv_mgmt()
+ return result_host
+
+ def get_adv_param_from_pb(self):
+ """ Provide all the advance parameters named as required by SDK
+
+ :return: all given advanced parameters
+ :rtype: dict
+ """
+ param = {}
+ LOG.info("Getting all given advance parameter")
+ host_dict = self.get_host_dict_from_pb()
+ if host_dict:
+ param.update(host_dict)
+
+ fields = ('description', 'anonymous_uid', 'anonymous_gid')
+ for field in fields:
+ if self.module.params[field] is not None:
+ param[field] = self.module.params[field]
+
+ if self.module.params['min_security'] and self.module.params[
+ 'min_security'] in utils.NFSShareSecurityEnum.__members__:
+ LOG.info("Getting min_security object from NFSShareSecurityEnum")
+ param['min_security'] = utils.NFSShareSecurityEnum[
+ self.module.params['min_security']]
+
+ if self.module.params['default_access']:
+ param['default_access'] = self.get_default_access()
+
+ LOG.info("Successfully got advance parameter: %s", param)
+ return param
+
+ def get_default_access(self):
+ LOG.info("Getting default_access object from "
+ "NFSShareDefaultAccessEnum")
+ default_access = self.default_access.get(
+ self.module.params['default_access'],
+ self.module.params['default_access'])
+ try:
+ return utils.NFSShareDefaultAccessEnum[default_access]
+ except KeyError as e:
+ msg = "default_access: %s not found error: %s" % (
+ default_access, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg)
+
+ def correct_payload_as_per_sdk(self, payload, nfs_details=None):
+ """ Correct payload keys as required by SDK
+
+ :param payload: Payload used for create/modify operation
+ :type payload: dict
+ :param nfs_details: NFS details
+ :type nfs_details: dict
+ :return: Payload required by SDK
+ :rtype: dict
+ """
+ ouput_host_param = self.host_param_mapping.values()
+ if set(payload.keys()) & set(ouput_host_param):
+ if not nfs_details or (nfs_details and nfs_details['export_option'] != 1):
+ payload['export_option'] = 1
+ if 'read_write_root_hosts_string' in payload:
+ # SDK have param named 'root_access_hosts_string' instead of
+ # 'read_write_root_hosts_string'
+ payload['root_access_hosts_string'] = payload.pop(
+ 'read_write_root_hosts_string')
+
+ return payload
+
+ def create_nfs_share_from_filesystem(self):
+ """ Create nfs share from given filesystem
+
+ :return: nfs_share object
+ :rtype: UnityNfsShare
+ """
+
+ name = self.module.params['nfs_export_name']
+ path = self.module.params['path']
+
+ if not name or not path:
+ msg = "Please provide name and path both for create"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ param = self.get_adv_param_from_pb()
+ if 'default_access' in param:
+ # create nfs from FILESYSTEM take 'share_access' as param in SDK
+ param['share_access'] = param.pop('default_access')
+ LOG.info("Param name: 'share_access' is used instead of "
+ "'default_access' in SDK so changed")
+
+ param = self.correct_payload_as_per_sdk(param)
+
+ LOG.info("Creating nfs share from filesystem with param: %s", param)
+ try:
+ nfs_obj = utils.UnityNfsShare.create(
+ cli=self.cli, name=name, fs=self.fs_obj, path=path, **param)
+ LOG.info("Successfully created nfs share: %s", nfs_obj)
+ return nfs_obj
+ except utils.UnityNfsShareNameExistedError as e:
+ LOG.error(str(e))
+ self.module.fail_json(msg=str(e))
+ except Exception as e:
+ msg = "Failed to create nfs share: %s error: %s" % (name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def create_nfs_share_from_snapshot(self):
+ """ Create nfs share from given snapshot
+
+ :return: nfs_share object
+ :rtype: UnityNfsShare
+ """
+
+ name = self.module.params['nfs_export_name']
+ path = self.module.params['path']
+
+ if not name or not path:
+ msg = "Please provide name and path both for create"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ param = self.get_adv_param_from_pb()
+
+ param = self.correct_payload_as_per_sdk(param)
+
+ LOG.info("Creating nfs share from snap with param: %s", param)
+ try:
+ nfs_obj = utils.UnityNfsShare.create_from_snap(
+ cli=self.cli, name=name, snap=self.snap_obj, path=path, **param)
+ LOG.info("Successfully created nfs share: %s", nfs_obj)
+ return nfs_obj
+ except utils.UnityNfsShareNameExistedError as e:
+ LOG.error(str(e))
+ self.module.fail_json(msg=str(e))
+ except Exception as e:
+ msg = "Failed to create nfs share: %s error: %s" % (name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def create_nfs_share(self):
+ """ Create nfs share from either filesystem/snapshot
+
+ :return: nfs_share object
+ :rtype: UnityNfsShare
+ """
+ if self.is_given_nfs_for_fs:
+ # Share to be created from filesystem
+ return self.create_nfs_share_from_filesystem()
+ elif self.is_given_nfs_for_fs is False:
+ # Share to be created from snapshot
+ return self.create_nfs_share_from_snapshot()
+ else:
+ msg = "Please provide filesystem or filesystem snapshot to create NFS export"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def convert_host_str_to_list(self, host_str):
+ """ Convert host_str which have comma separated hosts to host_list with
+ ip4/ip6 host obj if IP4/IP6 like string found
+
+ :param host_str: hosts str separated by comma
+ :return: hosts list, which may contains IP4/IP6 object if given in
+ host_str
+ :rytpe: list
+ """
+ if not host_str:
+ LOG.debug("Empty host_str given")
+ return []
+
+ host_list = []
+ try:
+ for h in host_str.split(","):
+ version = get_ip_version(h)
+ if version == 4:
+ h = u'{0}'.format(h)
+ h = IPv4Network(h, strict=False)
+ elif version == 6:
+ h = u'{0}'.format(h)
+ h = IPv6Network(h, strict=False)
+ host_list.append(h)
+ except Exception as e:
+ msg = "Error while converting host_str: %s to list error: %s" % (
+ host_str, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ return host_list
+
+ def add_host_dict_for_adv(self, existing_host_dict, new_host_dict):
+ """ Compares & adds up new hosts with the existing ones and provide
+ the final consolidated hosts for advance host management
+
+ :param existing_host_dict: All hosts params details which are
+ associated with existing nfs which to be modified
+ :type existing_host_dict: dict
+ :param new_host_dict: All hosts param details which are to be added
+ :type new_host_dict: dict
+ :return: consolidated hosts params details which contains newly added
+ hosts along with the existing ones
+ :rtype: dict
+ """
+ modify_host_dict = {}
+ for host_access_key in existing_host_dict:
+ LOG.debug("Checking for param: %s", host_access_key)
+ new_host_obj_list = new_host_dict[host_access_key]
+ if new_host_obj_list and not existing_host_dict[host_access_key]:
+ # Existing nfs host is empty so lets directly add
+ # new_host_str as it is
+ LOG.debug("Existing nfs host key: %s is empty, so lets add new host given value as it is", host_access_key)
+ modify_host_dict[host_access_key] = new_host_obj_list
+ continue
+
+ existing_host_obj_list = [self.get_host_obj(host_id=existing_host_dict['UnityHost']['id'])
+ for existing_host_dict in existing_host_dict[host_access_key]['UnityHostList']]
+
+ if not new_host_obj_list:
+ LOG.debug("Nothing to add as no host given")
+ continue
+
+ existing_set = set(host.id for host in existing_host_obj_list)
+ actual_to_add = [new_host for new_host in new_host_obj_list if new_host.id not in existing_set]
+
+ if not actual_to_add:
+ LOG.debug("All host given to be added is already added")
+ continue
+
+ # Lets extends actual_to_add list, which is new with existing
+ actual_to_add.extend(existing_host_obj_list)
+ modify_host_dict[host_access_key] = actual_to_add
+
+ return modify_host_dict
+
+ def add_host_dict_for_non_adv(self, existing_host_dict, new_host_dict):
+ """ Compares & adds up new hosts with the existing ones and provide
+ the final consolidated hosts for non advance host management
+
+ :param existing_host_dict: All hosts params details which are
+ associated with existing nfs which to be modified
+ :type existing_host_dict: dict
+ :param new_host_dict: All hosts param details which are to be added
+ :type new_host_dict: dict
+ :return: consolidated hosts params details which contains newly added
+ hosts along with the existing ones
+ :rtype: dict
+ """
+ modify_host_dict = {}
+ for host_access_key in existing_host_dict:
+ LOG.debug("Checking add host for param: %s", host_access_key)
+ existing_host_str = existing_host_dict[host_access_key]
+ existing_host_list = self.convert_host_str_to_list(
+ existing_host_str)
+
+ new_host_str = new_host_dict[host_access_key]
+ new_host_list = self.convert_host_str_to_list(
+ new_host_str)
+
+ if not new_host_list:
+ LOG.debug("Nothing to add as no host given")
+ continue
+
+ if new_host_list and not existing_host_list:
+ # Existing nfs host is empty so lets directly add
+ # new_host_str as it is
+ LOG.debug("Existing nfs host key: %s is empty, so lets add new host given value as it is", host_access_key)
+ modify_host_dict[host_access_key] = new_host_str
+ continue
+
+ actual_to_add = list(set(new_host_list) - set(existing_host_list))
+ if not actual_to_add:
+ LOG.debug("All host given to be added is already added")
+ continue
+
+ # Lets extends actual_to_add list, which is new with existing
+ actual_to_add.extend(existing_host_list)
+
+ # Since SDK takes host_str as ',' separated instead of list, so
+ # lets convert str to list
+ # Note: explicity str() needed here to convert IP4/IP6 object
+ modify_host_dict[host_access_key] = ",".join(str(v) for v in actual_to_add)
+ return modify_host_dict
+
+ def remove_host_dict_for_adv(self, existing_host_dict, new_host_dict):
+ """ Compares & remove new hosts from the existing ones and provide
+ the remaining hosts for advance host management
+
+ :param existing_host_dict: All hosts params details which are
+ associated with existing nfs which to be modified
+ :type existing_host_dict: dict
+ :param new_host_dict: All hosts param details which are to be removed
+ :type new_host_dict: dict
+ :return: existing hosts params details from which given new hosts are
+ removed
+ :rtype: dict
+ """
+ modify_host_dict = {}
+ for host_access_key in existing_host_dict:
+ LOG.debug("Checking host for param: %s", host_access_key)
+ if not existing_host_dict[host_access_key]:
+ # existing list is already empty, so nothing to remove
+ LOG.debug("Existing list is already empty, so nothing to remove")
+ continue
+
+ existing_host_obj_list = [self.get_host_obj(host_id=existing_host_dict['UnityHost']['id'])
+ for existing_host_dict in existing_host_dict[host_access_key]['UnityHostList']]
+ new_host_obj_list = new_host_dict[host_access_key]
+
+ if new_host_obj_list == []:
+ LOG.debug("Nothing to remove as no host given")
+ continue
+
+ unique_new_host_list = [new_host.id for new_host in new_host_obj_list]
+ if len(new_host_obj_list) > len(set(unique_new_host_list)):
+ msg = f'Duplicate host given: {unique_new_host_list} in host param: {host_access_key}'
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ unique_existing_host_list = [host.id for host in existing_host_obj_list]
+ actual_to_remove = list(set(unique_new_host_list) & set(
+ unique_existing_host_list))
+ if not actual_to_remove:
+ continue
+
+ final_host_list = [existing_host for existing_host in existing_host_obj_list if existing_host.id not in unique_new_host_list]
+
+ modify_host_dict[host_access_key] = final_host_list
+
+ return modify_host_dict
+
+ def remove_host_dict_for_non_adv(self, existing_host_dict, new_host_dict):
+ """ Compares & remove new hosts from the existing ones and provide
+ the remaining hosts for non advance host management
+
+ :param existing_host_dict: All hosts params details which are
+ associated with existing nfs which to be modified
+ :type existing_host_dict: dict
+ :param new_host_dict: All hosts param details which are to be removed
+ :type new_host_dict: dict
+ :return: existing hosts params details from which given new hosts are
+ removed
+ :rtype: dict
+ """
+ modify_host_dict = {}
+
+ for host_access_key in existing_host_dict:
+ LOG.debug("Checking remove host for param: %s", host_access_key)
+ existing_host_str = existing_host_dict[host_access_key]
+ existing_host_list = self.convert_host_str_to_list(
+ existing_host_str)
+
+ new_host_str = new_host_dict[host_access_key]
+ new_host_list = self.convert_host_str_to_list(
+ new_host_str)
+
+ if not new_host_list:
+ LOG.debug("Nothing to remove as no host given")
+ continue
+
+ if len(new_host_list) > len(set(new_host_list)):
+ msg = "Duplicate host given: %s in host param: %s" % (
+ new_host_list, host_access_key)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if new_host_list and not existing_host_list:
+ # existing list is already empty, so nothing to remove
+ LOG.debug("Existing list is already empty, so nothing to remove")
+ continue
+
+ actual_to_remove = list(set(new_host_list) & set(
+ existing_host_list))
+ if not actual_to_remove:
+ continue
+
+ final_host_list = list(set(existing_host_list) - set(
+ actual_to_remove))
+
+ # Since SDK takes host_str as ',' separated instead of list, so
+ # lets convert str to list
+ # Note: explicity str() needed here to convert IP4/IP6 object
+ modify_host_dict[host_access_key] = ",".join(str(v) for v in final_host_list)
+
+ return modify_host_dict
+
+ def add_host(self, existing_host_dict, new_host_dict):
+ """ Compares & adds up new hosts with the existing ones and provide
+ the final consolidated hosts
+
+ :param existing_host_dict: All hosts params details which are
+ associated with existing nfs which to be modified
+ :type existing_host_dict: dict
+ :param new_host_dict: All hosts param details which are to be added
+ :type new_host_dict: dict
+ :return: consolidated hosts params details which contains newly added
+ hosts along with the existing ones
+ :rtype: dict
+ """
+ if self.module.params['adv_host_mgmt_enabled']:
+ modify_host_dict = self.add_host_dict_for_adv(existing_host_dict, new_host_dict)
+ else:
+ modify_host_dict = self.add_host_dict_for_non_adv(existing_host_dict, new_host_dict)
+
+ return modify_host_dict
+
+ def remove_host(self, existing_host_dict, new_host_dict):
+ """ Compares & remove new hosts from the existing ones and provide
+ the remaining hosts
+
+ :param existing_host_dict: All hosts params details which are
+ associated with existing nfs which to be modified
+ :type existing_host_dict: dict
+ :param new_host_dict: All hosts param details which are to be removed
+ :type new_host_dict: dict
+ :return: existing hosts params details from which given new hosts are
+ removed
+ :rtype: dict
+ """
+ if self.module.params['adv_host_mgmt_enabled']:
+ modify_host_dict = self.remove_host_dict_for_adv(existing_host_dict, new_host_dict)
+ else:
+ modify_host_dict = self.remove_host_dict_for_non_adv(existing_host_dict, new_host_dict)
+
+ return modify_host_dict
+
+ def modify_nfs_share(self, nfs_obj):
+ """ Modify given nfs share
+
+ :param nfs_obj: NFS share obj
+ :type nfs_obj: UnityNfsShare
+ :return: tuple(bool, nfs_obj)
+ - bool: indicates whether nfs_obj is modified or not
+ - nfs_obj: same nfs_obj if not modified else modified nfs_obj
+ :rtype: tuple
+ """
+ modify_param = {}
+ LOG.info("Modifying nfs share")
+
+ nfs_details = nfs_obj._get_properties()
+ fields = ('description', 'anonymous_uid', 'anonymous_gid')
+ for field in fields:
+ if self.module.params[field] is not None and \
+ self.module.params[field] != nfs_details[field]:
+ modify_param[field] = self.module.params[field]
+
+ if self.module.params['min_security'] and self.module.params[
+ 'min_security'] != nfs_obj.min_security.name:
+ modify_param['min_security'] = utils.NFSShareSecurityEnum[
+ self.module.params['min_security']]
+
+ if self.module.params['default_access']:
+ default_access = self.get_default_access()
+ if default_access != nfs_obj.default_access:
+ modify_param['default_access'] = default_access
+
+ new_host_dict = self.get_host_dict_from_pb()
+ if new_host_dict:
+ try:
+ if is_nfs_have_host_with_host_obj(nfs_details) and not self.module.params['adv_host_mgmt_enabled']:
+ msg = "Modification of nfs host is restricted using adv_host_mgmt_enabled as false since nfs " \
+ "already have host added using host obj"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ elif is_nfs_have_host_with_host_string(nfs_details) and self.module.params['adv_host_mgmt_enabled']:
+ msg = "Modification of nfs host is restricted using adv_host_mgmt_enabled as true since nfs " \
+ "already have host added without host obj"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ LOG.info("Extracting same given param from nfs")
+ existing_host_dict = {k: nfs_details[k] for k in new_host_dict}
+ except KeyError as e:
+ msg = "Failed to extract key-value from current nfs: %s" % \
+ str(e)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ if self.module.params['host_state'] == HOST_STATE_LIST[0]:
+ # present-in-export
+ LOG.info("Getting host to be added")
+ modify_host_dict = self.add_host(existing_host_dict, new_host_dict)
+ else:
+ # absent-in-export
+ LOG.info("Getting host to be removed")
+ modify_host_dict = self.remove_host(existing_host_dict, new_host_dict)
+
+ if modify_host_dict:
+ modify_param.update(modify_host_dict)
+
+ if not modify_param:
+ LOG.info("Existing nfs attribute value is same as given input, "
+ "so returning same nfs object - idempotency case")
+ return False, nfs_obj
+
+ modify_param = self.correct_payload_as_per_sdk(
+ modify_param, nfs_details)
+
+ try:
+ resp = nfs_obj.modify(**modify_param)
+ resp.raise_if_err()
+ except Exception as e:
+ msg = "Failed to modify nfs error: %s" % str(e)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ return True, self.get_nfs_share(id=nfs_obj.id)
+
+ def perform_module_operation(self):
+ """ Perform different actions on nfs based on user parameter
+ chosen in playbook """
+
+ changed = False
+ nfs_share_details = {}
+
+ self.validate_input()
+
+ self.nas_obj = None
+ if self.module.params['nas_server_id'] or self.module.params[
+ 'nas_server_name']:
+ self.nas_obj = self.get_nas_from_given_input()
+
+ self.fs_obj = None
+ self.snap_obj = None
+ if self.is_given_nfs_for_fs:
+ self.fs_obj = self.get_filesystem()
+ elif self.is_given_nfs_for_fs is False:
+ self.snap_obj = self.get_snapshot()
+
+ # Get nfs Share
+ nfs_obj = self.get_nfs_share(
+ id=self.module.params['nfs_export_id'],
+ name=self.module.params['nfs_export_name']
+ )
+
+ # Delete nfs Share
+ if self.module.params['state'] == STATE_LIST[1]:
+ if nfs_obj:
+ # delete_nfs_share() does not return any value
+ # In case of successful delete, lets nfs_obj set None
+ # to avoid fetching and displaying attribute
+ nfs_obj = self.delete_nfs_share(nfs_obj)
+ changed = True
+ elif not nfs_obj:
+ # create
+ nfs_obj = self.create_nfs_share()
+ changed = True
+ else:
+ # modify
+ changed, nfs_obj = self.modify_nfs_share(nfs_obj)
+
+ # Get display attributes
+ if self.module.params['state'] and nfs_obj:
+ nfs_share_details = get_nfs_share_display_attrs(nfs_obj)
+
+ result = {"changed": changed,
+ "nfs_share_details": nfs_share_details}
+ self.module.exit_json(**result)
+
+
+def get_nfs_share_display_attrs(nfs_obj):
+ """ Provide nfs share attributes for display
+
+ :param nfs: NFS share obj
+ :type nfs: UnityNfsShare
+ :return: nfs_share_details
+ :rtype: dict
+ """
+ LOG.info("Getting nfs share details from nfs share object")
+ nfs_share_details = nfs_obj._get_properties()
+
+ # Adding filesystem_name to nfs_share_details
+ LOG.info("Updating filesystem details")
+ nfs_share_details['filesystem']['UnityFileSystem']['name'] = \
+ nfs_obj.filesystem.name
+ if 'id' not in nfs_share_details['filesystem']['UnityFileSystem']:
+ nfs_share_details['filesystem']['UnityFileSystem']['id'] = \
+ nfs_obj.filesystem.id
+
+ # Adding nas server details
+ LOG.info("Updating nas server details")
+ nas_details = nfs_obj.filesystem._get_properties()['nas_server']
+ nas_details['UnityNasServer']['name'] = \
+ nfs_obj.filesystem.nas_server.name
+ nfs_share_details['nas_server'] = nas_details
+
+ # Adding snap.id & snap.name if nfs_obj is for snap
+ if is_nfs_obj_for_snap(nfs_obj):
+ LOG.info("Updating snap details")
+ nfs_share_details['snap']['UnitySnap']['id'] = nfs_obj.snap.id
+ nfs_share_details['snap']['UnitySnap']['name'] = nfs_obj.snap.name
+
+ LOG.info("Successfully updated nfs share details")
+ return nfs_share_details
+
+
+def is_nfs_have_host_with_host_obj(nfs_details):
+ """ Check whether nfs host is already added using host obj
+
+ :param nfs_details: nfs details
+ :return: True if nfs have host already added with host obj else False
+ :rtype: bool
+ """
+ host_obj_params = ('no_access_hosts', 'read_only_hosts',
+ 'read_only_root_access_hosts', 'read_write_hosts',
+ 'root_access_hosts')
+ for host_obj_param in host_obj_params:
+ if nfs_details.get(host_obj_param):
+ return True
+ return False
+
+
+def is_nfs_have_host_with_host_string(nfs_details):
+ """ Check whether nfs host is already added using host by string method
+
+ :param nfs_details: nfs details
+ :return: True if nfs have host already added with host string method else False
+ :rtype: bool
+ """
+ host_obj_params = (
+ 'no_access_hosts_string',
+ 'read_only_hosts_string',
+ 'read_only_root_hosts_string',
+ 'read_write_hosts_string',
+ 'read_write_root_hosts_string'
+ )
+ for host_obj_param in host_obj_params:
+ if nfs_details.get(host_obj_param):
+ return True
+ return False
+
+
+def get_ip_version(val):
+ try:
+ val = u'{0}'.format(val)
+ ip = ip_network(val, strict=False)
+ return ip.version
+ except ValueError:
+ return 0
+
+
+def is_nfs_obj_for_fs(nfs_obj):
+ """ Check whether the nfs_obj if for filesystem
+
+ :param nfs_obj: NFS share object
+ :return: True if nfs_obj is of filesystem type
+ :rtype: bool
+ """
+ if nfs_obj.type == utils.NFSTypeEnum.NFS_SHARE:
+ return True
+ return False
+
+
+def is_nfs_obj_for_snap(nfs_obj):
+ """ Check whether the nfs_obj if for snapshot
+
+ :param nfs_obj: NFS share object
+ :return: True if nfs_obj is of snapshot type
+ :rtype: bool
+ """
+ if nfs_obj.type == utils.NFSTypeEnum.NFS_SNAPSHOT:
+ return True
+ return False
+
+
+def get_nfs_parameters():
+ """ Provides parameters required for the NFS share module on Unity """
+
+ return dict(
+ nfs_export_name=dict(required=False, type='str'),
+ nfs_export_id=dict(required=False, type='str'),
+ filesystem_id=dict(required=False, type='str'),
+ filesystem_name=dict(required=False, type='str'),
+ snapshot_id=dict(required=False, type='str'),
+ snapshot_name=dict(required=False, type='str'),
+ nas_server_id=dict(required=False, type='str'),
+ nas_server_name=dict(required=False, type='str'),
+ path=dict(required=False, type='str', no_log=True),
+ description=dict(required=False, type='str'),
+ default_access=dict(required=False, type='str',
+ choices=DEFAULT_ACCESS_LIST),
+ min_security=dict(required=False, type='str',
+ choices=MIN_SECURITY_LIST),
+ adv_host_mgmt_enabled=dict(required=False, type='bool', default=None),
+ no_access_hosts=HOST_DICT,
+ read_only_hosts=HOST_DICT,
+ read_only_root_hosts=HOST_DICT,
+ read_write_hosts=HOST_DICT,
+ read_write_root_hosts=HOST_DICT,
+ host_state=dict(required=False, type='str', choices=HOST_STATE_LIST),
+ anonymous_uid=dict(required=False, type='int'),
+ anonymous_gid=dict(required=False, type='int'),
+ state=dict(required=True, type='str', choices=STATE_LIST)
+ )
+
+
+def main():
+ """ Create UnityNFS object and perform action on it
+ based on user input from playbook"""
+ obj = NFS()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/nfsserver.py b/ansible_collections/dellemc/unity/plugins/modules/nfsserver.py
new file mode 100644
index 000000000..e492e3af0
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/nfsserver.py
@@ -0,0 +1,494 @@
+#!/usr/bin/python
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing NFS server on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: nfsserver
+version_added: '1.4.0'
+short_description: Manage NFS server on Unity storage system
+description:
+- Managing the NFS server on the Unity storage system includes creating NFS server, getting NFS server details
+ and deleting NFS server attributes.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Meenakshi Dembi (@dembim) <ansible.team@dell.com>
+
+options:
+ nas_server_name:
+ description:
+ - Name of the NAS server on which NFS server will be hosted.
+ type: str
+ nas_server_id:
+ description:
+ - ID of the NAS server on which NFS server will be hosted.
+ type: str
+ nfs_server_id:
+ description:
+ - ID of the NFS server.
+ type: str
+ host_name:
+ description:
+ - Host name of the NFS server.
+ type: str
+ nfs_v4_enabled:
+ description:
+ - Indicates whether the NFSv4 is enabled on the NAS server.
+ type: bool
+ is_secure_enabled:
+ description:
+ - Indicates whether the secure NFS is enabled.
+ type: bool
+ kerberos_domain_controller_type:
+ description:
+ - Type of Kerberos Domain Controller used for secure NFS service.
+ choices: [CUSTOM, UNIX, WINDOWS]
+ type: str
+ kerberos_domain_controller_username:
+ description:
+ - Kerberos Domain Controller administrator username.
+ type: str
+ kerberos_domain_controller_password:
+ description:
+ - Kerberos Domain Controller administrator password.
+ type: str
+ is_extended_credentials_enabled:
+ description:
+ - Indicates whether support for more than 16 unix groups in a Unix credential.
+ type: bool
+ remove_spn_from_kerberos:
+ description:
+ - Indicates whether to remove the SPN from Kerberos Domain Controller.
+ default: true
+ type: bool
+ state:
+ description:
+ - Define whether the NFS server should exist or not.
+ choices: [absent, present]
+ required: true
+ type: str
+notes:
+- The I(check_mode) is supported.
+- Modify operation for NFS Server is not supported.
+- When I(kerberos_domain_controller_type) is C(UNIX), I(kdc_type) in I(nfs_server_details) output is displayed as C(null).
+'''
+
+EXAMPLES = r'''
+
+ - name: Create NFS server with kdctype as Windows
+ dellemc.unity.nfsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ host_name: "dummy_nas23"
+ is_secure_enabled: True
+ kerberos_domain_controller_type: "WINDOWS"
+ kerberos_domain_controller_username: "administrator"
+ kerberos_domain_controller_password: "Password123!"
+ is_extended_credentials_enabled: True
+ nfs_v4_enabled: True
+ state: "present"
+
+ - name: Create NFS server with kdctype as Unix
+ dellemc.unity.nfsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ host_name: "dummy_nas23"
+ is_secure_enabled: True
+ kerberos_domain_controller_type: "UNIX"
+ is_extended_credentials_enabled: True
+ nfs_v4_enabled: True
+ state: "present"
+
+ - name: Get NFS server details
+ dellemc.unity.nfsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ state: "present"
+
+ - name: Delete NFS server
+ dellemc.unity.nfsserver:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ nas_server_name: "dummy_nas"
+ kerberos_domain_controller_username: "administrator"
+ kerberos_domain_controller_password: "Password123!"
+ unjoin_server_account: False
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+nfs_server_details:
+ description: Details of the NFS server.
+ returned: When NFS server exists
+ type: dict
+ contains:
+ credentials_cache_ttl:
+ description: Credential cache refresh timeout. Resolution is in minutes. Default value is 15 minutes.
+ type: str
+ existed:
+ description: Indicates if NFS Server exists.
+ type: bool
+ host_name:
+ description: Host name of the NFS server.
+ type: str
+ id:
+ description: Unique identifier of the NFS Server instance.
+ type: str
+ is_extended_credentials_enabled:
+ description: Indicates whether the NFS server supports more than 16 Unix groups in a Unix credential.
+ type: bool
+ is_secure_enabled:
+ description: Indicates whether secure NFS is enabled on the NFS server.
+ type: bool
+ kdc_type:
+ description: Type of Kerberos Domain Controller used for secure NFS service.
+ type: str
+ nfs_v4_enabled:
+ description: Indicates whether NFSv4 is enabled on the NAS server.
+ type: bool
+ servicee_principal_name:
+ description: The Service Principal Name (SPN) for the NFS Server.
+ type: str
+ sample: {
+ "credentials_cache_ttl": "0:15:00",
+ "existed": true,
+ "file_interfaces": {
+ "UnityFileInterfaceList": [
+ {
+ "UnityFileInterface": {
+ "hash": 8778980109421,
+ "id": "if_37"
+ }
+ }
+ ]
+ },
+ "hash": 8778980109388,
+ "host_name": "dummy_nas23.pie.lab.emc.com",
+ "id": "nfs_51",
+ "is_extended_credentials_enabled": true,
+ "is_secure_enabled": true,
+ "kdc_type": "KdcTypeEnum.WINDOWS",
+ "nas_server": {
+ "UnityNasServer": {
+ "hash": 8778980109412
+ }
+ },
+ "nfs_v4_enabled": true,
+ "servicee_principal_name": null
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('nfsserver')
+
+application_type = "Ansible/1.6.0"
+
+
+class NFSServer(object):
+ """Class with NFS server operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_nfs_server_parameters())
+
+ mutually_exclusive = [['nas_server_name', 'nas_server_id']]
+ required_one_of = [['nfs_server_id', 'nas_server_name', 'nas_server_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of
+ )
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ LOG.info('Check Mode Flag %s', self.module.check_mode)
+
+ def get_nfs_server_details(self, nfs_server_id=None, nas_server_id=None):
+ """Get NFS server details.
+ :param: nfs_server_id: The ID of the NFS server
+ :param: nas_server_id: The name of the NAS server
+ :return: Dict containing NFS server details if exists
+ """
+ LOG.info("Getting NFS server details")
+ try:
+ if nfs_server_id:
+ nfs_server_details = self.unity_conn.get_nfs_server(_id=nfs_server_id)
+ return nfs_server_details._get_properties()
+ elif nas_server_id:
+ nfs_server_details = self.unity_conn.get_nfs_server(nas_server=nas_server_id)
+ if len(nfs_server_details) > 0:
+ return process_dict(nfs_server_details._get_properties())
+ return None
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ msg = 'Incorrect username or password provided.'
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ else:
+ err_msg = "Failed to get details of NFS Server" \
+ " with error {0}".format(str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ err_msg = "Failed to get details of NFS Server" \
+ " with error {0}".format(str(e))
+ LOG.error(err_msg)
+ return None
+
+ def get_nfs_server_instance(self, nfs_server_id):
+ """Get NFS server instance.
+ :param: nfs_server_id: The ID of the NFS server
+ :return: Return NFS server instance if exists
+ """
+
+ try:
+ nfs_server_obj = self.unity_conn.get_nfs_server(_id=nfs_server_id)
+ return nfs_server_obj
+ except Exception as e:
+ error_msg = "Failed to get the NFS server %s instance" \
+ " with error %s" % (nfs_server_id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def delete_nfs_server(self, nfs_server_id, skip_unjoin=None, domain_username=None, domain_password=None):
+ """Delete NFS server.
+ :param: nfs_server_id: The ID of the NFS server
+ :param: skip_unjoin: Flag indicating whether to unjoin SMB server account from AD before deletion
+ :param: domain_username: The domain username
+ :param: domain_password: The domain password
+ :return: Return True if NFS server is deleted
+ """
+
+ LOG.info("Deleting NFS server")
+ try:
+ if not self.module.check_mode:
+ nfs_obj = self.get_nfs_server_instance(nfs_server_id=nfs_server_id)
+ nfs_obj.delete(skip_kdc_unjoin=skip_unjoin, username=domain_username, password=domain_password)
+ return True
+ except Exception as e:
+ msg = "Failed to delete NFS server: %s with error: %s" % (nfs_server_id, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_nas_server_id(self, nas_server_name):
+ """Get NAS server ID.
+ :param: nas_server_name: The name of NAS server
+ :return: Return NAS server ID if exists
+ """
+
+ LOG.info("Getting NAS server ID")
+ try:
+ obj_nas = self.unity_conn.get_nas_server(name=nas_server_name)
+ return obj_nas.get_id()
+ except Exception as e:
+ msg = "Failed to get details of NAS server: %s with error: %s" % (nas_server_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def is_modification_required(self, is_extended_credentials_enabled, nfs_server_details):
+ """Check if modification is required in existing NFS server
+ :param: is_extended_credentials_enabled: Indicates whether the NFS server supports more than 16 Unix groups in a Unix credential.
+ :param: nfs_server_details: NFS server details
+ :return: True if modification is required
+ """
+
+ LOG.info("Checking if any modification is required")
+ # Check for Extend Credential
+ if is_extended_credentials_enabled is not None and \
+ is_extended_credentials_enabled != nfs_server_details['is_extended_credentials_enabled']:
+ return True
+
+ def create_nfs_server(self, nas_server_id, host_name=None, nfs_v4_enabled=None, is_secure_enabled=None,
+ kerberos_domain_controller_type=None, kerberos_domain_controller_username=None,
+ kerberos_domain_controller_password=None, is_extended_credentials_enabled=None):
+ """Create NFS server.
+ :param: nas_server_id: The ID of NAS server.
+ :param: host_name: Name of NFS Server.
+ :param: nfs_v4_enabled: Indicates whether the NFSv4 is enabled on the NAS server.
+ :param: is_secure_enabled: Indicates whether the secure NFS is enabled.
+ :param: kerberos_domain_controller_type: Type of Kerberos Domain Controller used for secure NFS service.
+ :param: kerberos_domain_controller_username: Kerberos Domain Controller administrator username.
+ :param: kerberos_domain_controller_password: Kerberos Domain Controller administrator password.
+ :param: is_extended_credentials_enabled: Indicates whether support for more than 16 unix groups in a Unix credential.
+ """
+
+ LOG.info("Creating NFS server")
+ try:
+ if not self.module.check_mode:
+ kdc_enum_type = get_enum_kdctype(kerberos_domain_controller_type)
+ if kerberos_domain_controller_type == "UNIX":
+ is_extended_credentials_enabled = None
+ is_secure_enabled = None
+ utils.UnityNfsServer.create(cli=self.unity_conn._cli, nas_server=nas_server_id, host_name=host_name,
+ nfs_v4_enabled=nfs_v4_enabled,
+ is_secure_enabled=is_secure_enabled, kdc_type=kdc_enum_type,
+ kdc_username=kerberos_domain_controller_username,
+ kdc_password=kerberos_domain_controller_password,
+ is_extended_credentials_enabled=is_extended_credentials_enabled)
+ return True
+ except Exception as e:
+ msg = "Failed to create NFS server with on NAS Server %s with error: %s" % (nas_server_id, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def validate_input_params(self):
+ param_list = ["nfs_server_id", "nas_server_id", "nas_server_name", "host_name", "kerberos_domain_controller_username",
+ "kerberos_domain_controller_password"]
+
+ for param in param_list:
+ msg = "Please provide valid value for: %s" % param
+ if self.module.params[param] is not None and len(self.module.params[param].strip()) == 0:
+ errmsg = msg.format(param)
+ self.module.fail_json(msg=errmsg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on NFS server module based on parameters
+ passed in the playbook
+ """
+ nfs_server_id = self.module.params['nfs_server_id']
+ nas_server_id = self.module.params['nas_server_id']
+ nas_server_name = self.module.params['nas_server_name']
+ host_name = self.module.params['host_name']
+ nfs_v4_enabled = self.module.params['nfs_v4_enabled']
+ is_secure_enabled = self.module.params['is_secure_enabled']
+ kerberos_domain_controller_type = self.module.params['kerberos_domain_controller_type']
+ kerberos_domain_controller_username = self.module.params['kerberos_domain_controller_username']
+ kerberos_domain_controller_password = self.module.params['kerberos_domain_controller_password']
+ is_extended_credentials_enabled = self.module.params['is_extended_credentials_enabled']
+ remove_spn_from_kerberos = self.module.params['remove_spn_from_kerberos']
+ state = self.module.params['state']
+
+ # result is a dictionary that contains changed status and NFS server details
+ result = dict(
+ changed=False,
+ nfs_server_details={}
+ )
+
+ modify_flag = False
+
+ self.validate_input_params()
+
+ if nas_server_name:
+ nas_server_id = self.get_nas_server_id(nas_server_name)
+
+ nfs_server_details = self.get_nfs_server_details(nfs_server_id=nfs_server_id,
+ nas_server_id=nas_server_id)
+
+ # Check if modification is required
+ if nfs_server_details and state == 'present':
+ modify_flag = self.is_modification_required(is_extended_credentials_enabled, nfs_server_details)
+ if modify_flag:
+ self.module.fail_json(msg="Modification of NFS Server parameters is not supported through Ansible module")
+
+ if not nfs_server_details and state == 'present':
+ if not nas_server_id:
+ self.module.fail_json(msg="Please provide nas server id/name to create NFS server.")
+
+ result['changed'] = self.create_nfs_server(nas_server_id, host_name, nfs_v4_enabled,
+ is_secure_enabled, kerberos_domain_controller_type,
+ kerberos_domain_controller_username,
+ kerberos_domain_controller_password,
+ is_extended_credentials_enabled)
+
+ if state == 'absent' and nfs_server_details:
+ skip_unjoin = not remove_spn_from_kerberos
+ result['changed'] = self.delete_nfs_server(nfs_server_details["id"], skip_unjoin,
+ kerberos_domain_controller_username,
+ kerberos_domain_controller_password)
+
+ if state == 'present':
+ result['nfs_server_details'] = self.get_nfs_server_details(nfs_server_id=nfs_server_id,
+ nas_server_id=nas_server_id)
+ self.module.exit_json(**result)
+
+
+def get_nfs_server_parameters():
+ """This method provide parameters required for the ansible
+ NFS server module on Unity"""
+ return dict(
+ nfs_server_id=dict(type='str'),
+ host_name=dict(type='str'),
+ nfs_v4_enabled=dict(type='bool'),
+ is_secure_enabled=dict(type='bool'),
+ kerberos_domain_controller_type=dict(type='str', choices=['UNIX', 'WINDOWS', 'CUSTOM']),
+ kerberos_domain_controller_username=dict(type='str'),
+ kerberos_domain_controller_password=dict(type='str', no_log=True),
+ nas_server_name=dict(type='str'),
+ nas_server_id=dict(type='str'),
+ is_extended_credentials_enabled=dict(type='bool'),
+ remove_spn_from_kerberos=dict(default=True, type='bool'),
+ state=dict(required=True, type='str', choices=['present', 'absent']),
+ )
+
+
+def get_enum_kdctype(kerberos_domain_controller_type):
+ """Getting correct enum values for kerberos_domain_controller_type
+ :param: kerberos_domain_controller_type: Type of Kerberos Domain Controller used for secure NFS service.
+ :return: enum value for kerberos_domain_controller_type.
+ """
+
+ if utils.KdcTypeEnum[kerberos_domain_controller_type]:
+ kerberos_domain_controller_type = utils.KdcTypeEnum[kerberos_domain_controller_type]
+ return kerberos_domain_controller_type
+
+
+def process_dict(nfs_server_details):
+ """Process NFS server details.
+ :param: nfs_server_details: Dict containing NFS server details
+ :return: Processed dict containing NFS server details
+ """
+ param_list = ['credentials_cache_ttl', 'file_interfaces', 'host_name', 'id', 'kdc_type', 'nas_server', 'is_secure_enabled',
+ 'is_extended_credentials_enabled', 'nfs_v4_enabled', 'servicee_principal_name']
+
+ for param in param_list:
+ if param in nfs_server_details and param == 'credentials_cache_ttl':
+ nfs_server_details[param] = str(nfs_server_details[param][0])
+ else:
+ nfs_server_details[param] = nfs_server_details[param][0]
+ return nfs_server_details
+
+
+def main():
+ """Create Unity NFS server object and perform action on it
+ based on user input from playbook"""
+ obj = NFSServer()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/smbshare.py b/ansible_collections/dellemc/unity/plugins/modules/smbshare.py
new file mode 100644
index 000000000..58bc8c709
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/smbshare.py
@@ -0,0 +1,877 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: smbshare
+version_added: '1.1.0'
+short_description: Manage SMB shares on Unity storage system
+extends_documentation_fragment:
+- dellemc.unity.unity
+author:
+- P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com>
+description:
+- Managing SMB Shares on Unity storage system includes create, get,
+ modify, and delete the smb shares.
+options:
+ share_name:
+ description:
+ - Name of the SMB share.
+ - Required during creation of the SMB share.
+ - For all other operations either I(share_name) or I(share_id) is required.
+ type: str
+ share_id:
+ description:
+ - ID of the SMB share.
+ - Should not be specified during creation. Id is auto generated.
+ - For all other operations either I(share_name) or I(share_id) is required.
+ - If I(share_id) is used then no need to pass nas_server/filesystem/snapshot/path.
+ type: str
+ path:
+ description:
+ - Local path to the file system/Snapshot or any existing sub-folder of
+ the file system/Snapshot that is shared over the network.
+ - Path is relative to the root of the filesystem.
+ - Required for creation of the SMB share.
+ type: str
+ filesystem_id:
+ description:
+ - The ID of the File System.
+ - Either I(filesystem_name) or I(filesystem_id) is required for creation of the SMB share for filesystem.
+ - If I(filesystem_name) is specified, then I(nas_server_name)/I(nas_server_id) is required to
+ uniquely identify the filesystem.
+ - Options I(filesystem_name) and I(filesystem_id) are mutually exclusive parameters.
+ type: str
+ snapshot_id:
+ description:
+ - The ID of the Filesystem Snapshot.
+ - Either I(snapshot_name) or I(snapshot_id) is required for creation of the SMB share for a snapshot.
+ - If I(snapshot_name) is specified, then I(nas_server_name)/I(nas_server_id) is required to
+ uniquely identify the snapshot.
+ - Options I(snapshot_name) and I(snapshot_id) are mutually exclusive parameters.
+ type: str
+ nas_server_id:
+ description:
+ - The ID of the NAS Server.
+ - It is not required if I(share_id) is used.
+ type: str
+ filesystem_name:
+ description:
+ - The Name of the File System.
+ - Either I(filesystem_name) or I(filesystem_id) is required for creation of the SMB share for filesystem.
+ - If I(filesystem_name) is specified, then I(nas_server_name)/I(nas_server_id) is required to
+ uniquely identify the filesystem.
+ - Options I(filesystem_name) and I(filesytem_id) are mutually exclusive parameters.
+ type: str
+ snapshot_name:
+ description:
+ - The Name of the Filesystem Snapshot.
+ - Either I(snapshot_name) or I(snapshot_id) is required for creation of the SMB share for a snapshot.
+ - If I(snapshot_name) is specified, then I(nas_server_name)/I(nas_server_id) is required to
+ uniquely identify the snapshot.
+ - Options I(snapshot_name) and I(snapshot_id) are mutually exclusive parameters.
+ type: str
+ nas_server_name:
+ description:
+ - The Name of the NAS Server.
+ - It is not required if I(share_id) is used.
+ - Options I(nas_server_name) and I(nas_server_id) are mutually exclusive parameters.
+ type: str
+ description:
+ description:
+ - Description for the SMB share.
+ - Optional parameter when creating a share.
+ - To modify, pass the new value in description field.
+ type: str
+ is_abe_enabled:
+ description:
+ - Indicates whether Access-based Enumeration (ABE) for SMB share is enabled.
+ - During creation, if not mentioned then default is C(false).
+ type: bool
+ is_branch_cache_enabled:
+ description:
+ - Indicates whether Branch Cache optimization for SMB share is enabled.
+ - During creation, if not mentioned then default is C(false).
+ type: bool
+ is_continuous_availability_enabled:
+ description:
+ - Indicates whether continuous availability for SMB 3.0 is enabled.
+ - During creation, if not mentioned then default is C(false).
+ type: bool
+ is_encryption_enabled:
+ description:
+ - Indicates whether encryption for SMB 3.0 is enabled at the shared folder level.
+ - During creation, if not mentioned then default is C(false).
+ type: bool
+ offline_availability:
+ description:
+ - Defines valid states of Offline Availability.
+ - C(MANUAL)- Only specified files will be available offline.
+ - C(DOCUMENTS)- All files that users open will be available offline.
+ - C(PROGRAMS)- Program will preferably run from the offline cache even when
+ connected to the network. All files that users open will be available offline.
+ - C(NONE)- Prevents clients from storing documents and programs in offline cache.
+ type: str
+ choices: ["MANUAL","DOCUMENTS","PROGRAMS","NONE"]
+ umask:
+ description:
+ - The default UNIX umask for new files created on the SMB Share.
+ type: str
+ state:
+ description:
+ - Define whether the SMB share should exist or not.
+ - Value C(present) indicates that the share should exist on the system.
+ - Value C(absent) indicates that the share should not exist on the system.
+ type: str
+ required: true
+ choices: ['absent', 'present']
+notes:
+- When ID/Name of the filesystem/snapshot is passed then I(nas_server) is not required.
+ If passed, then filesystem/snapshot should exist for the mentioned I(nas_server),
+ else the task will fail.
+- The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+- name: Create SMB share for a filesystem
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_name: "sample_smb_share"
+ filesystem_name: "sample_fs"
+ nas_server_id: "NAS_11"
+ path: "/sample_fs"
+ description: "Sample SMB share created"
+ is_abe_enabled: True
+ is_branch_cache_enabled: True
+ offline_availability: "DOCUMENTS"
+ is_continuous_availability_enabled: True
+ is_encryption_enabled: True
+ umask: "777"
+ state: "present"
+- name: Modify Attributes of SMB share for a filesystem
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_name: "sample_smb_share"
+ nas_server_name: "sample_nas_server"
+ description: "Sample SMB share attributes updated"
+ is_abe_enabled: False
+ is_branch_cache_enabled: False
+ offline_availability: "MANUAL"
+ is_continuous_availability_enabled: "False"
+ is_encryption_enabled: "False"
+ umask: "022"
+ state: "present"
+- name: Create SMB share for a snapshot
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_name: "sample_snap_smb_share"
+ snapshot_name: "sample_snapshot"
+ nas_server_id: "NAS_11"
+ path: "/sample_snapshot"
+ description: "Sample SMB share created for snapshot"
+ is_abe_enabled: True
+ is_branch_cache_enabled: True
+ is_continuous_availability_enabled: True
+ is_encryption_enabled: True
+ umask: "777"
+ state: "present"
+- name: Modify Attributes of SMB share for a snapshot
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_name: "sample_snap_smb_share"
+ snapshot_name: "sample_snapshot"
+ description: "Sample SMB share attributes updated for snapshot"
+ is_abe_enabled: False
+ is_branch_cache_enabled: False
+ offline_availability: "MANUAL"
+ is_continuous_availability_enabled: "False"
+ is_encryption_enabled: "False"
+ umask: "022"
+ state: "present"
+- name: Get details of SMB share
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_id: "{{smb_share_id}}"
+ state: "present"
+- name: Delete SMB share
+ dellemc.unity.smbshare:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ share_id: "{{smb_share_id}}"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: True
+smb_share_details:
+ description: The SMB share details.
+ type: dict
+ returned: When share exists.
+ contains:
+ id:
+ description: The ID of the SMB share.
+ type: str
+ name:
+ description: Name of the SMB share.
+ type: str
+ sample: "sample_smb_share"
+ filesystem_id:
+ description: The ID of the Filesystem.
+ type: str
+ filesystem_name:
+ description: The Name of the filesystem
+ type: str
+ snapshot_id:
+ description: The ID of the Snapshot.
+ type: str
+ snapshot_name:
+ description: The Name of the Snapshot.
+ type: str
+ nas_server_id:
+ description: The ID of the nas_server.
+ type: str
+ nas_server_name:
+ description: The Name of the nas_server.
+ type: str
+ description:
+ description: Additional information about the share.
+ type: str
+ sample: This share is created for demo purpose only.
+ is_abe_enabled:
+ description: Whether Access Based enumeration is enforced or not.
+ type: bool
+ sample: false
+ is_branch_cache_enabled:
+ description: Whether branch cache is enabled or not.
+ type: bool
+ sample: false
+ is_continuous_availability_enabled:
+ description: Whether the share will be available continuously or not.
+ type: bool
+ sample: false
+ is_encryption_enabled:
+ description: Whether encryption is enabled or not.
+ type: bool
+ sample: false
+ umask:
+ description: Unix mask for the SMB share.
+ type: str
+ sample: {
+ "creation_time": "2022-03-17 11:56:54.867000+00:00",
+ "description": "",
+ "existed": true,
+ "export_paths": [
+ "\\\\multi-prot-pie.extreme1.com\\multi-prot-hui",
+ "\\\\10.230.24.26\\multi-prot-hui"
+ ],
+ "filesystem": {
+ "UnityFileSystem": {
+ "hash": 8748426746492
+ }
+ },
+ "filesystem_id": "fs_140",
+ "filesystem_name": "multi-prot-hui",
+ "hash": 8748426746588,
+ "id": "SMBShare_20",
+ "is_abe_enabled": false,
+ "is_ace_enabled": false,
+ "is_branch_cache_enabled": false,
+ "is_continuous_availability_enabled": false,
+ "is_dfs_enabled": false,
+ "is_encryption_enabled": false,
+ "is_read_only": null,
+ "modified_time": "2022-03-17 11:56:54.867000+00:00",
+ "name": "multi-prot-hui",
+ "nas_server_id": "nas_5",
+ "nas_server_name": "multi-prot",
+ "offline_availability": "CifsShareOfflineAvailabilityEnum.NONE",
+ "path": "/",
+ "snap": null,
+ "type": "CIFSTypeEnum.CIFS_SHARE",
+ "umask": "022"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('smbshare')
+
+application_type = "Ansible/1.6.0"
+
+
+class SMBShare(object):
+ """Class with SMB Share operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_smb_share_parameters())
+
+ # initialize the ansible module
+ mut_ex_args = [['share_name', 'share_id'],
+ ['nas_server_name', 'nas_server_id'],
+ ['filesystem_name', 'snapshot_name',
+ 'filesystem_id', 'snapshot_id'],
+ ['share_id', 'nas_server_name'],
+ ['share_id', 'nas_server_id'],
+ ['share_id', 'filesystem_name'],
+ ['share_id', 'filesystem_id'],
+ ['share_id', 'path'],
+ ['share_id', 'snapshot_name'],
+ ['share_id', 'snapshot_id']]
+ required_one_of = [['share_id', 'share_name']]
+
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mut_ex_args,
+ required_one_of=required_one_of
+ )
+ utils.ensure_required_libs(self.module)
+
+ # result is a dictionary that contains changed status and
+ # snapshot details
+ self.result = {"changed": False,
+ 'smb_share_details': {}}
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ self.smb_share_conn_obj = utils.cifs_share.UnityCifsShare(
+ self.unity_conn)
+ LOG.info('Connection established with the Unity Array')
+
+ def get_offline_availability_enum(self, offline_availability):
+ """
+ Get the enum of the Offline Availability parameter.
+ :param offline_availability: The offline_availability string
+ :return: offline_availability enum
+ """
+ if offline_availability in \
+ utils.CifsShareOfflineAvailabilityEnum.__members__:
+ return utils.CifsShareOfflineAvailabilityEnum[
+ offline_availability]
+ else:
+ error_msg = "Invalid value {0} for offline availability" \
+ " provided".format(offline_availability)
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_smb_share_obj(self, share_id=None, share_name=None,
+ filesystem_obj=None, snap_obj=None, nas_obj=None):
+ """Get SMB share details"""
+ msg = "Failed to get details of SMB Share {0} with error {1} "
+ smb_share = share_name if share_name else share_id
+ try:
+ if share_id:
+ obj_smb = self.unity_conn.get_cifs_share(_id=share_id)
+ if obj_smb and obj_smb.existed:
+ LOG.info("Successfully got the SMB share "
+ "object %s ", obj_smb)
+ return obj_smb
+
+ elif share_name is not None and filesystem_obj:
+ # There might be a case where SMB share with same name exists
+ # for different nas server. Hence, filesystem_obj is passed
+ # along with share name to get a unique resource.
+ return self.unity_conn.get_cifs_share(
+ name=share_name, filesystem=filesystem_obj)
+
+ elif share_name is not None and snap_obj:
+ # There might be a case where SMB share with same name exists
+ # for different nas server. Hence, snap_obj is passed
+ # along with share name to get a unique resource.
+ return self.unity_conn.get_cifs_share(
+ name=share_name, snap=snap_obj)
+
+ # This elif is addressing scenario where nas server details is
+ # passed and neither filesystem nor snapshot details are passed.
+ elif share_name is not None and nas_obj:
+ # Multiple smb shares can be received, as only name is passed
+ smb_share_obj = self.unity_conn.get_cifs_share(
+ name=share_name)
+
+ # Checking if instance or list of instance is returned.
+ if isinstance(smb_share_obj,
+ utils.cifs_share.UnityCifsShareList):
+ LOG.info("Multiple SMB share with same name found.")
+ smb_share_obj_list = smb_share_obj
+
+ for smb_share in smb_share_obj_list:
+ if smb_share.filesystem.nas_server == nas_obj:
+ return smb_share
+
+ msg = "No SMB share found with the given NAS Server." \
+ " Please provide correct share name and" \
+ " nas server details."
+ return None
+
+ # Below statements will execute when there is only single
+ # smb share returned.
+ if smb_share_obj.filesystem.nas_server == nas_obj:
+ return smb_share_obj
+ msg = "No SMB share found with the given NAS Server." \
+ " Please provide correct share name and" \
+ " nas server details."
+ return None
+
+ else:
+ self.module.fail_json(
+ msg="Share Name is Passed. Please enter Filesystem/"
+ "Snapshot/NAS Server Resource along with share_name"
+ " to get the details of the SMB share")
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ cred_err = "Incorrect username or password , {0}".format(
+ e.message)
+ self.module.fail_json(msg=cred_err)
+ else:
+ err_msg = msg.format(smb_share, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ err_msg = msg.format(smb_share, str(e))
+ LOG.error(err_msg)
+ return None
+
+ except Exception as e:
+ err_msg = msg.format(smb_share, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ def create_smb_share(self, share_name, path, filesystem_obj=None,
+ snapshot_obj=None, description=None,
+ is_abe_enabled=None, is_branch_cache_enabled=None,
+ is_continuous_availability_enabled=None,
+ is_encryption_enabled=None,
+ offline_availability=None, umask=None):
+ """
+ Create SMB Share
+ :return: SMB Share Object if successful, else error.
+ """
+ if path is None or path == "":
+ self.module.fail_json(msg="Please enter a valid path."
+ " Empty string or None provided.")
+ if not filesystem_obj and not snapshot_obj:
+ self.module.fail_json(msg="Either Filesystem or Snapshot "
+ "Resource's Name/ID is required to"
+ " Create a SMB share")
+ try:
+ if filesystem_obj:
+ return self.smb_share_conn_obj.create(
+ cli=self.unity_conn._cli, name=share_name,
+ fs=filesystem_obj, path=path,
+ is_encryption_enabled=is_encryption_enabled,
+ is_con_avail_enabled=is_continuous_availability_enabled,
+ is_abe_enabled=is_abe_enabled,
+ is_branch_cache_enabled=is_branch_cache_enabled,
+ umask=umask, description=description,
+ offline_availability=offline_availability)
+ else:
+ return self.smb_share_conn_obj.create_from_snap(
+ cli=self.unity_conn._cli, name=share_name,
+ snap=snapshot_obj, path=path,
+ is_encryption_enabled=is_encryption_enabled,
+ is_con_avail_enabled=is_continuous_availability_enabled,
+ is_abe_enabled=is_abe_enabled,
+ is_branch_cache_enabled=is_branch_cache_enabled,
+ umask=umask, description=description,
+ offline_availability=offline_availability)
+
+ except Exception as e:
+ error_msg = "Failed to create SMB share" \
+ " %s with error %s" % (share_name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_filesystem(self, filesystem_id=None, filesystem_name=None,
+ nas_server_obj=None):
+ """
+ Get the Filesystem Object.
+ :param filesystem_id: ID of the Filesystem.
+ :param filesystem_name: Name of the filesystem.
+ :param nas_server_obj: NAS Server object.
+ :return: Object of the filesystem.
+ """
+ try:
+ if filesystem_id:
+ obj_fs = self.unity_conn.get_filesystem(_id=filesystem_id)
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem "
+ "object %s ", obj_fs)
+ return obj_fs
+ else:
+ return self.unity_conn.get_filesystem(
+ name=filesystem_name, nas_server=nas_server_obj)
+ return None
+ except Exception as e:
+ filesystem = filesystem_name if filesystem_name \
+ else filesystem_id
+ err_msg = "Failed to get filesystem details {0} with" \
+ " error {1}".format(filesystem, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ def get_snapshot(self, snapshot_name, snapshot_id):
+ """
+ Get the Snapshot Object.
+ :param snapshot_id: ID of the Snapshot.
+ :param snapshot_name: Name of the Snapshot
+ :return: Object of the filesystem.
+ """
+ try:
+ obj_snap = self.unity_conn.get_snap(_id=snapshot_id,
+ name=snapshot_name)
+ if snapshot_id and obj_snap and not obj_snap.existed:
+ LOG.info("Snapshot object does not exist %s ", obj_snap)
+ return None
+ return obj_snap
+ except Exception as e:
+ snapshot = snapshot_name if snapshot_name else snapshot_id
+ err_msg = "Failed to get filesystem snapshots details {0} with" \
+ " error {1}".format(snapshot, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ def get_nas_server(self, nas_server_name, nas_server_id):
+ """
+ Get the NAS Server Object using NAME/ID of the NAS Server.
+ :param nas_server_name: Name of the NAS Server
+ :param nas_server_id: ID of the NAS Server
+ :return: NAS Server object.
+ """
+ nas_server = nas_server_name if nas_server_name else nas_server_id
+ try:
+ obj_nas = self.unity_conn.get_nas_server(_id=nas_server_id,
+ name=nas_server_name)
+ if nas_server_id and obj_nas and not obj_nas.existed:
+ LOG.info("NAS Server object does not exist %s ", obj_nas)
+ return None
+ return obj_nas
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ cred_err = "Incorrect username or password , {0}".format(
+ e.message)
+ self.module.fail_json(msg=cred_err)
+ else:
+ err_msg = "Failed to get details of NAS Server" \
+ " {0} with error {1}".format(nas_server, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+ except Exception as e:
+ nas_server = nas_server_name if nas_server_name \
+ else nas_server_id
+ err_msg = "Failed to get nas server details {0} with" \
+ " error {1}".format(nas_server, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ def delete_smb_share(self, smb_share_obj):
+ """
+ Delete SMB share if exists, else thrown error.
+ """
+ try:
+ smb_share_obj.delete()
+ except Exception as e:
+ error_msg = "Failed to Delete SMB share" \
+ " %s with error %s" % (smb_share_obj.name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def to_update(self, smb_share_obj):
+ LOG.info("Checking Whether the parameters are modified or not.")
+
+ offline_availability = self.module.params['offline_availability']
+ # Get the enum for the corresponding offline_availability
+ if offline_availability:
+ offline_availability = \
+ self.get_offline_availability_enum(offline_availability)
+ if offline_availability is not None and \
+ offline_availability != smb_share_obj.offline_availability:
+ return True
+
+ smb_share_dict = smb_share_obj._get_properties()
+ params_list = ['is_abe_enabled', 'is_branch_cache_enabled',
+ 'is_continuous_availability_enabled',
+ 'is_encryption_enabled', 'description', 'umask']
+ for param in params_list:
+ if self.module.params[param] is not None and \
+ self.module.params[param] != smb_share_dict[param]:
+ return True
+ return False
+
+ def update_smb_share(self, smb_share_obj, is_encryption_enabled=None,
+ is_continuous_availability_enabled=None,
+ is_abe_enabled=None,
+ is_branch_cache_enabled=None,
+ umask=None, description=None,
+ offline_availability=None):
+ """
+ The Details of the SMB share will be updated in the function.
+ """
+ try:
+ smb_share_obj.modify(
+ is_encryption_enabled=is_encryption_enabled,
+ is_con_avail_enabled=is_continuous_availability_enabled,
+ is_abe_enabled=is_abe_enabled,
+ is_branch_cache_enabled=is_branch_cache_enabled,
+ umask=umask, description=description,
+ offline_availability=offline_availability)
+
+ except Exception as e:
+ error_msg = "Failed to Update parameters of SMB share" \
+ " %s with error %s" % (smb_share_obj.name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on SMB share based on user parameters
+ chosen in playbook
+ """
+ state = self.module.params['state']
+ share_name = self.module.params['share_name']
+ filesystem_name = self.module.params['filesystem_name']
+ snapshot_name = self.module.params['snapshot_name']
+ nas_server_name = self.module.params['nas_server_name']
+ share_id = self.module.params['share_id']
+ filesystem_id = self.module.params['filesystem_id']
+ snapshot_id = self.module.params['snapshot_id']
+ nas_server_id = self.module.params['nas_server_id']
+ path = self.module.params['path']
+
+ description = self.module.params['description']
+ is_branch_cache_enabled = \
+ self.module.params['is_branch_cache_enabled']
+ is_continuous_availability_enabled = \
+ self.module.params['is_continuous_availability_enabled']
+ is_encryption_enabled = self.module.params['is_encryption_enabled']
+ is_abe_enabled = self.module.params['is_abe_enabled']
+ umask = self.module.params['umask']
+
+ offline_availability = self.module.params['offline_availability']
+ # Get the enum for the corresponding offline_availability
+ if offline_availability:
+ offline_availability = \
+ self.get_offline_availability_enum(offline_availability)
+
+ changed = False
+ '''
+ Validate parameters.
+ '''
+ if share_id is not None and \
+ (share_id == "" or len(share_id.split()) == 0):
+ self.module.fail_json(msg="Invalid share id provided."
+ " Please enter a valid share ID.")
+
+ '''
+ Get details of NAS Server, if entered.
+ '''
+ nas_server_obj = None
+ if nas_server_name or nas_server_id:
+ nas_server_obj = self.get_nas_server(nas_server_name,
+ nas_server_id)
+ if nas_server_obj:
+ msg = "NAS Server Object:" \
+ " {0}".format(nas_server_obj._get_properties())
+ LOG.info(msg)
+ else:
+ msg = "NAS Server Resource not fetched."
+ LOG.info(msg)
+
+ '''
+ Get details of Filesystem, if entered.
+ '''
+ filesystem_obj = None
+ if filesystem_id:
+ filesystem_obj = self.get_filesystem(filesystem_id)
+ if filesystem_name:
+ # nas_server_obj is required to uniquely identify filesystem
+ # resource. If neither nas_server_name nor nas_server_id
+ # is passed along with filesystem_name then error is thrown.
+ if not nas_server_obj:
+ self.module.fail_json(msg="nas_server_id/nas_server_name is "
+ "required when filesystem_name is "
+ "passed")
+ filesystem_obj = self.get_filesystem(
+ None, filesystem_name, nas_server_obj)
+ if filesystem_obj:
+ msg = "Filesystem Object:" \
+ " {0}".format(filesystem_obj._get_properties())
+ LOG.info(msg)
+ # Checking if filesystem supports SMB protocol or not.
+ if filesystem_obj and \
+ filesystem_obj.supported_protocols.name == "NFS":
+ self.module.fail_json(msg="Cannot perform SMB share operations "
+ "as file system supports only NFS "
+ "protocol. Please enter a valid "
+ "Filesystem having supported protocol"
+ " as SMB or Multiprotocol.")
+ '''
+ Get details of Snapshot, if entered.
+ '''
+ snapshot_obj = None
+ if snapshot_id or snapshot_name:
+ # Snapshot Name and Snapshot ID both are unique across array.
+ # Hence no need to mention nas server details
+ snapshot_obj = self.get_snapshot(snapshot_name, snapshot_id)
+ if snapshot_obj:
+ msg = "Snapshot Object:" \
+ " {0}".format(snapshot_obj._get_properties())
+ LOG.info(msg)
+ else:
+ msg = "Snapshot Resource not fetched."
+ LOG.info(msg)
+
+ '''
+ Get the Details of the SMB Share
+ '''
+ smb_share_obj = self.get_smb_share_obj(
+ share_id, share_name, filesystem_obj, snapshot_obj,
+ nas_server_obj)
+ if smb_share_obj:
+ msg = "SMB Share Object:" \
+ " {0}".format(smb_share_obj._get_properties())
+ LOG.info(msg)
+ elif state == 'present' and share_id:
+ msg = "Unable to fetch SMB Share Resource. " \
+ "Incorrect SMB share id provided. " \
+ "Please enter a correct share id."
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ '''
+ Creation of SMB Share
+ '''
+ if state == "present" and not smb_share_obj:
+ smb_share_obj = self.create_smb_share(
+ share_name, path, filesystem_obj, snapshot_obj, description,
+ is_abe_enabled, is_branch_cache_enabled,
+ is_continuous_availability_enabled, is_encryption_enabled,
+ offline_availability, umask)
+ changed = True
+
+ '''
+ Update the SMB share details
+ '''
+ if state == "present" and smb_share_obj:
+ LOG.info("Modify the details of the SMB share.")
+ update_flag = self.to_update(smb_share_obj)
+ msg = "Update Flag: {0}".format(str(update_flag))
+ LOG.info(msg)
+ if update_flag:
+ self.update_smb_share(smb_share_obj, is_encryption_enabled,
+ is_continuous_availability_enabled,
+ is_abe_enabled,
+ is_branch_cache_enabled,
+ umask, description,
+ offline_availability)
+ changed = True
+
+ '''
+ Delete the SMB share details
+ '''
+ if state == "absent" and smb_share_obj:
+ self.delete_smb_share(smb_share_obj)
+ changed = True
+
+ '''
+ Update the changed state and SMB share details
+ '''
+
+ self.result["changed"] = changed
+ smb_details = self.prepare_output_dict(state, share_id, share_name,
+ filesystem_obj, snapshot_obj,
+ nas_server_obj)
+ self.result["smb_share_details"] = smb_details
+ self.module.exit_json(**self.result)
+
+ def prepare_output_dict(self, state, share_id, share_name,
+ filesystem_obj, snapshot_obj, nas_server_obj):
+ smb_share_details = None
+ smb_share_obj = None
+ if state == 'present':
+ smb_share_obj = self.get_smb_share_obj(
+ share_id, share_name, filesystem_obj,
+ snapshot_obj, nas_server_obj)
+ smb_share_details = smb_share_obj._get_properties()
+ if smb_share_details:
+ # Get Snapshot NAME and ID if SMB share exists for Snapshot
+ if smb_share_obj.type.name == "CIFS_SNAPSHOT":
+ smb_share_details['snapshot_name'] = smb_share_obj.snap.name
+ smb_share_details['snapshot_id'] = smb_share_obj.snap.id
+
+ # Get Filesystem NAME and ID
+ smb_share_details['filesystem_name'] = \
+ smb_share_obj.filesystem.name
+ smb_share_details['filesystem_id'] = smb_share_obj.filesystem.id
+
+ # Get NAS server NAME and ID
+ smb_share_details['nas_server_name'] = \
+ smb_share_obj.filesystem.nas_server.name
+ smb_share_details['nas_server_id'] = \
+ smb_share_obj.filesystem.nas_server.id
+ return smb_share_details
+
+
+def get_smb_share_parameters():
+ """
+ This method provides parameters required for the ansible smb share
+ modules on Unity
+ """
+
+ return dict(
+ share_name=dict(), share_id=dict(),
+ filesystem_name=dict(), filesystem_id=dict(),
+ snapshot_name=dict(), snapshot_id=dict(),
+ nas_server_name=dict(), nas_server_id=dict(),
+ path=dict(no_log=True), umask=dict(), description=dict(),
+ offline_availability=dict(
+ choices=["MANUAL", "DOCUMENTS", "PROGRAMS", "NONE"]),
+ is_abe_enabled=dict(type='bool'),
+ is_branch_cache_enabled=dict(type='bool'),
+ is_continuous_availability_enabled=dict(type='bool'),
+ is_encryption_enabled=dict(type='bool'),
+ state=dict(required=True, choices=['present', 'absent'], type='str')
+ )
+
+
+def main():
+ """ Create Unity SMB share object and perform action on it
+ based on user input from playbook"""
+ obj = SMBShare()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/snapshot.py b/ansible_collections/dellemc/unity/plugins/modules/snapshot.py
new file mode 100644
index 000000000..c8aba1846
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/snapshot.py
@@ -0,0 +1,751 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+""" Ansible module for managing Snapshots on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: snapshot
+short_description: Manage snapshots on the Unity storage system
+description:
+- Managing snapshots on the Unity storage system includes create snapshot,
+ delete snapshot, update snapshot, get snapshot, map host and unmap host.
+version_added: '1.1.0'
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- P Srinivas Rao (@srinivas-rao5) <ansible.team@dell.com>
+options:
+ snapshot_name:
+ description:
+ - The name of the snapshot.
+ - Mandatory parameter for creating a snapshot.
+ - For all other operations either I(snapshot_name) or I(snapshot_id) is
+ required.
+ type: str
+ vol_name:
+ description:
+ - The name of the volume for which snapshot is created.
+ - For creation of a snapshot either I(vol_name) or I(cg_name) is required.
+ - Not required for other operations.
+ type: str
+ cg_name:
+ description:
+ - The name of the Consistency Group for which snapshot is created.
+ - For creation of a snapshot either I(vol_name) or I(cg_name) is required.
+ - Not required for other operations.
+ type: str
+ snapshot_id:
+ description:
+ - The id of the snapshot.
+ - For all operations other than creation either I(snapshot_name) or
+ I(snapshot_id) is required.
+ type: str
+ auto_delete:
+ description:
+ - This option specifies whether the snapshot is auto deleted or not.
+ - If set to C(true), snapshot will expire based on the pool auto deletion
+ policy.
+ - If set to (false), snapshot will not be auto deleted
+ based on the pool auto deletion policy.
+ - Option I(auto_delete) can not be set to C(true), if I(expiry_time) is specified.
+ - If during creation neither I(auto_delete) nor I(expiry_time) is mentioned
+ then snapshot will be created keeping I(auto_delete) as C(true).
+ - Once the I(expiry_time) is set then snapshot cannot be assigned
+ to the auto delete policy.
+ type: bool
+ expiry_time:
+ description:
+ - This option is for specifying the date and time after which the
+ snapshot will expire.
+ - The time is to be mentioned in UTC timezone.
+ - The format is "MM/DD/YYYY HH:MM". Year must be in 4 digits.
+ type: str
+ description:
+ description:
+ - The additional information about the snapshot can be provided using
+ this option.
+ type: str
+ new_snapshot_name:
+ description:
+ - New name for the snapshot.
+ type: str
+ state:
+ description:
+ - The I(state) option is used to mention the existence of
+ the snapshot.
+ type: str
+ required: true
+ choices: [ 'absent', 'present' ]
+ host_name:
+ description:
+ - The name of the host.
+ - Either I(host_name) or I(host_id) is required to map or unmap a snapshot from
+ a host.
+ - Snapshot can be attached to multiple hosts.
+ type: str
+ host_id:
+ description:
+ - The id of the host.
+ - Either I(host_name) or I(host_id) is required to map or unmap a snapshot from
+ a host.
+ - Snapshot can be attached to multiple hosts.
+ type: str
+ host_state:
+ description:
+ - The I(host_state) option is used to mention the existence of the host
+ for snapshot.
+ - It is required when a snapshot is mapped or unmapped from host.
+ type: str
+ choices: ['mapped', 'unmapped']
+
+notes:
+ - The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+ - name: Create a Snapshot for a CG
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ cg_name: "{{cg_name}}"
+ snapshot_name: "{{cg_snapshot_name}}"
+ description: "{{description}}"
+ auto_delete: False
+ state: "present"
+
+ - name: Create a Snapshot for a volume with Host attached
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ vol_name: "{{vol_name}}"
+ snapshot_name: "{{vol_snapshot_name}}"
+ description: "{{description}}"
+ expiry_time: "04/15/2025 16:30"
+ host_name: "{{host_name}}"
+ host_state: "mapped"
+ state: "present"
+
+ - name: Unmap a host for a Snapshot
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ snapshot_name: "{{vol_snapshot_name}}"
+ host_name: "{{host_name}}"
+ host_state: "unmapped"
+ state: "present"
+
+ - name: Map snapshot to a host
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ snapshot_name: "{{vol_snapshot_name}}"
+ host_name: "{{host_name}}"
+ host_state: "mapped"
+ state: "present"
+
+ - name: Update attributes of a Snapshot for a volume
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "{{vol_snapshot_name}}"
+ new_snapshot_name: "{{new_snapshot_name}}"
+ description: "{{new_description}}"
+ host_name: "{{host_name}}"
+ host_state: "unmapped"
+ state: "present"
+
+ - name: Delete Snapshot of CG
+ dellemc.unity.snapshot:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_name: "{{cg_snapshot_name}}"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: True
+
+snapshot_details:
+ description: Details of the snapshot.
+ returned: When snapshot exists
+ type: dict
+ contains:
+ is_auto_delete:
+ description: Additional information mentioned for snapshot.
+ type: str
+ expiration_time:
+ description: Date and time after which the snapshot
+ will expire.
+ type: str
+ hosts_list:
+ description: Contains the name and id of the associated
+ hosts.
+ type: dict
+ id:
+ description: Unique identifier of the snapshot instance.
+ type: str
+ name:
+ description: The name of the snapshot.
+ type: str
+ storage_resource_name:
+ description: Name of the storage resource for which the
+ snapshot exists.
+ type: str
+ storage_resource_id:
+ description: Id of the storage resource for which the snapshot
+ exists.
+ type: str
+ sample: {
+ "access_type": null,
+ "attached_wwn": null,
+ "creation_time": "2022-10-21 08:20:25.803000+00:00",
+ "creator_schedule": null,
+ "creator_type": "SnapCreatorTypeEnum.USER_CUSTOM",
+ "creator_user": {
+ "id": "user_admin"
+ },
+ "description": "Test snap creation",
+ "existed": true,
+ "expiration_time": null,
+ "hash": 8756689457056,
+ "hosts_list": [],
+ "id": "85899355291",
+ "io_limit_policy": null,
+ "is_auto_delete": true,
+ "is_modifiable": false,
+ "is_modified": false,
+ "is_read_only": true,
+ "is_system_snap": false,
+ "last_writable_time": null,
+ "lun": null,
+ "name": "ansible_snap_cg_1_1",
+ "parent_snap": null,
+ "size": null,
+ "snap_group": null,
+ "state": "SnapStateEnum.READY",
+ "storage_resource_id": "res_95",
+ "storage_resource_name": "CG_ansible_test_2_new"
+ }
+'''
+
+import logging
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+from datetime import datetime
+
+LOG = utils.get_logger('snapshot')
+
+application_type = "Ansible/1.6.0"
+
+
+class Snapshot(object):
+ """Class with Snapshot operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_snapshot_parameters())
+
+ mutually_exclusive = [['snapshot_name', 'snapshot_id'],
+ ['vol_name', 'cg_name'],
+ ['host_name', 'host_id']]
+
+ required_one_of = [['snapshot_name', 'snapshot_id']]
+ # initialize the ansible module
+ self.module = AnsibleModule(argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+
+ # result is a dictionary that contains changed status and
+ # snapshot details
+ self.result = {"changed": False,
+ 'snapshot_details': {}}
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ self.snap_obj = utils.snap.UnitySnap(self.unity_conn)
+ LOG.info('Connection established with the Unity Array')
+
+ def validate_expiry_time(self, expiry_time):
+ """Validates the specified expiry_time"""
+ try:
+ datetime.strptime(expiry_time, '%m/%d/%Y %H:%M')
+ except ValueError:
+ error_msg = "expiry_time not in MM/DD/YYYY HH:MM format"
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def to_update(self, snapshot, new_name=None, description=None,
+ auto_del=None, expiry_time=None, host=None,
+ host_state=None):
+ """Determines whether to update the snapshot or not"""
+ # If the snapshot has is_auto_delete True,
+ # Check if auto_delete in the input is either None or True
+ if expiry_time and snapshot.is_auto_delete and \
+ (auto_del is None or auto_del):
+ self.module.fail_json(msg="expiry_time can be assigned "
+ "when auto delete is False")
+ if auto_del and snapshot.expiration_time:
+ error_msg = "expiry_time for snapshot is set." \
+ " Once it is set then snapshot cannot" \
+ " be assigned to auto_delete policy"
+ self.module.fail_json(msg=error_msg)
+ if new_name and new_name != snapshot.name:
+ return True
+ if description and description != snapshot.description:
+ return True
+ if auto_del and auto_del != snapshot.is_auto_delete:
+ return True
+ if to_update_expiry_time(snapshot, expiry_time):
+ return True
+ if host and to_update_host_list(snapshot, host, host_state):
+ return True
+ return False
+
+ def update_snapshot(self, snapshot, new_name=None,
+ description=None, auto_del=None, expiry_time=None,
+ host_access_list=None):
+ try:
+ duration = None
+ if expiry_time:
+ duration = convert_timestamp_to_sec(
+ expiry_time, self.unity_conn.system_time)
+ if duration and duration <= 0:
+ self.module.fail_json(msg="expiry_time should be after"
+ " the current system time")
+ snapshot.modify(name=new_name, retentionDuration=duration,
+ isAutoDelete=auto_del, description=description,
+ hostAccess=host_access_list)
+ snapshot.update()
+ except Exception as e:
+ error_msg = "Failed to modify snapshot" \
+ " [name: %s , id: %s] with error %s"\
+ % (snapshot.name, snapshot.id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def create_snapshot(self, snap_name, storage_id, description=None,
+ auto_del=None, expiry_time=None):
+ try:
+ duration = None
+ if expiry_time:
+ duration = convert_timestamp_to_sec(
+ expiry_time, self.unity_conn.system_time)
+ if duration <= 0:
+ self.module.fail_json(msg="expiry_time should be after"
+ " the current system time")
+ snapshot = self.snap_obj.create(
+ cli=self.unity_conn._cli, storage_resource=storage_id,
+ name=snap_name, description=description,
+ is_auto_delete=auto_del, retention_duration=duration)
+ return snapshot
+ except Exception as e:
+ error_msg = "Failed to create snapshot" \
+ " %s with error %s" % (snap_name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def delete_snapshot(self, snapshot):
+ try:
+ if not bool(get_hosts_dict(snapshot)):
+ snapshot.detach_from(None)
+ snapshot.delete()
+ else:
+ snapshot.delete()
+ return None
+
+ except Exception as e:
+ error_msg = "Failed to delete snapshot" \
+ " [name: %s, id: %s] with error %s" \
+ % (snapshot.name, snapshot.id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_snapshot_obj(self, name=None, id=None):
+ snapshot = id if id else name
+ msg = "Failed to get details of snapshot %s with error %s "
+ try:
+ return self.unity_conn.get_snap(name=name, _id=id)
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ cred_err = "Incorrect username or password , {0}".format(
+ e.message)
+ self.module.fail_json(msg=cred_err)
+ else:
+ err_msg = msg % (snapshot, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ err_msg = msg % (snapshot, str(e))
+ LOG.error(err_msg)
+ return None
+
+ except Exception as e:
+ err_msg = msg % (snapshot, str(e))
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+ def get_volume_obj(self, name):
+ try:
+ return self.unity_conn.get_lun(name=name)
+ except Exception as e:
+ error_msg = "Failed to get volume %s with error %s"\
+ % (name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_cg_obj(self, name):
+ try:
+ return self.unity_conn.get_cg(name=name)
+ except Exception as e:
+ error_msg = "Failed to get cg %s with error %s" % (name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_host_obj(self, name=None, id=None):
+ """ Get the Host object"""
+ try:
+ return self.unity_conn.get_host(name=name, _id=id)
+ except Exception as e:
+ host = id if id else name
+ error_msg = "Failed to get host %s with error %s"\
+ % (host, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def attach_to_snap(self, snapshot, host):
+ """ Attach snapshot to a host """
+ try:
+ if not get_hosts_dict(snapshot):
+ snapshot.detach_from(None)
+ snapshot.attach_to(host)
+ snapshot.update()
+ except Exception as e:
+ error_msg = "Failed to attach snapshot [name: %s, id: %s]" \
+ " to host [%s, %s] with error %s"\
+ % (snapshot.name, snapshot.id,
+ host.name, host.id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on snapshot module based on parameters
+ chosen in playbook
+ """
+ snapshot_name = self.module.params['snapshot_name']
+ snapshot_id = self.module.params['snapshot_id']
+ vol_name = self.module.params['vol_name']
+ cg_name = self.module.params['cg_name']
+ auto_delete = self.module.params['auto_delete']
+ expiry_time = self.module.params['expiry_time']
+ description = self.module.params['description']
+ new_snapshot_name = self.module.params['new_snapshot_name']
+ host_name = self.module.params['host_name']
+ host_id = self.module.params['host_id']
+ host_state = self.module.params['host_state']
+ state = self.module.params['state']
+ host = None
+ storage_resource = None
+ changed = False
+
+ LOG.info("Getting Snapshot details")
+ snapshot = self.get_snapshot_obj(name=snapshot_name, id=snapshot_id)
+
+ if snapshot and not snapshot.existed:
+ snapshot = None
+ msg = "snapshot details: %s" % str(snapshot)
+ LOG.info(msg)
+
+ # Get Volume Object
+ if vol_name is not None:
+ if vol_name == "" or vol_name.isspace():
+ self.module.fail_json(msg="Invalid vol_name given, Please"
+ " provide a valid vol_name")
+ storage_resource = self.get_volume_obj(name=vol_name)
+
+ # Get Consistency Group Object
+ if cg_name is not None:
+ if cg_name == "" or cg_name.isspace():
+ self.module.fail_json(msg="Invalid cg_name given, Please"
+ " provide a valid cg_name")
+ storage_resource = self.get_cg_obj(name=cg_name)
+
+ # Get host object for volume snapshots
+ if host_id or host_name:
+ if cg_name:
+ self.module.fail_json(msg="Mapping CG snapshot to host"
+ " is not supported.")
+ host = self.get_host_obj(name=host_name, id=host_id)
+
+ # Check whether host_name or host_id is given in input
+ # along with host_state
+ if (host and not host_state) or (not host and host_state):
+ self.module.fail_json(
+ msg="Either host_name or host_id along with host_state "
+ "is required to map or unmap a snapshot from a host")
+
+ # Check for error, if user tries to create a snapshot with the
+ # same name for other storage resource.
+ if snapshot and storage_resource and\
+ (snapshot.storage_resource.id != storage_resource.id):
+ self.module.fail_json(
+ msg="Snapshot %s is of %s storage resource. Cannot create new"
+ " snapshot with same name for %s storage resource"
+ % (snapshot.name, snapshot.storage_resource.name,
+ storage_resource.name))
+
+ # check for valid expiry_time
+ if expiry_time is not None and \
+ (expiry_time == "" or expiry_time.isspace()):
+ self.module.fail_json(msg="Please provide valid expiry_time,"
+ " empty expiry_time given")
+ # Check if in input auto_delete is True and expiry_time is not None
+ if expiry_time and auto_delete:
+ error_msg = "Cannot set expiry_time if auto_delete given as True"
+ LOG.info(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ # Check whether to modify the snapshot or not
+ update_flag = False
+ if snapshot:
+ update_flag = self.to_update(snapshot,
+ new_name=new_snapshot_name,
+ description=description,
+ auto_del=auto_delete,
+ expiry_time=expiry_time,
+ host=host, host_state=host_state)
+ msg = "update_flag for snapshot %s" % str(update_flag)
+ LOG.info(msg)
+
+ # Create a Snapshot
+ if not snapshot and state == "present":
+ LOG.info("Creating a snapshot")
+ if snapshot_id:
+ self.module.fail_json(msg="Creation of Snapshot is allowed"
+ " using snapshot_name only, "
+ "snapshot_id given")
+ if snapshot_name == "" or snapshot_name.isspace():
+ self.module.fail_json(msg="snapshot_name is required for"
+ " creation of a snapshot,"
+ " empty snapshot_name given")
+ if not storage_resource:
+ self.module.fail_json(msg="vol_name or cg_name required to"
+ " create a snapshot")
+
+ if new_snapshot_name:
+ self.module.fail_json(
+ msg="new_snapshot_name can not be assigned"
+ " during creation of a snapshot")
+
+ snapshot = self.create_snapshot(snapshot_name,
+ storage_resource.id,
+ description, auto_delete,
+ expiry_time)
+ if host and host_state == "mapped":
+ self.attach_to_snap(snapshot, host)
+ changed = True
+
+ # Update the Snapshot
+ if snapshot and state == "present" and update_flag:
+
+ LOG.info("Updating the Snapshot details")
+
+ if host_state == 'mapped':
+ self.attach_to_snap(snapshot, host)
+ self.update_snapshot(
+ snapshot, new_name=new_snapshot_name,
+ description=description, auto_del=auto_delete,
+ expiry_time=expiry_time)
+
+ elif host_state == 'unmapped':
+ host_access_list = create_host_access_list(snapshot,
+ host,
+ host_state)
+ self.update_snapshot(
+ snapshot, new_name=new_snapshot_name,
+ description=description, auto_del=auto_delete,
+ expiry_time=expiry_time,
+ host_access_list=host_access_list)
+
+ else:
+ self.update_snapshot(
+ snapshot, new_name=new_snapshot_name,
+ description=description, auto_del=auto_delete,
+ expiry_time=expiry_time)
+ changed = True
+
+ # Delete the Snapshot
+ if state == "absent" and snapshot:
+ snapshot = self.delete_snapshot(snapshot)
+ changed = True
+
+ # Add snapshot details to the result.
+ if snapshot:
+ snapshot.update()
+ self.result["snapshot_details"] = \
+ create_snapshot_details_dict(snapshot)
+ else:
+ self.result["snapshot_details"] = {}
+
+ self.result["changed"] = changed
+ self.module.exit_json(**self.result)
+
+
+def create_snapshot_details_dict(snapshot):
+ """ Add name and id of storage resource and hosts to snapshot details """
+ snapshot_dict = snapshot._get_properties()
+ del snapshot_dict['storage_resource']
+ del snapshot_dict['host_access']
+ snapshot_dict['hosts_list'] = get_hosts_list(
+ get_hosts_dict(snapshot))
+ snapshot_dict['storage_resource_name'] = \
+ snapshot.storage_resource.name
+ snapshot_dict['storage_resource_id'] = \
+ snapshot.storage_resource.id
+ return snapshot_dict
+
+
+def get_hosts_list(hosts_dict):
+ """ Get the host name and host id of all the associated hosts """
+ hosts_list = []
+ if not hosts_dict:
+ return hosts_list
+
+ for host in list(hosts_dict.keys()):
+ hosts_list.append(
+ {
+ "host_name": host.name,
+ "host_id": host.id
+ }
+ )
+ return hosts_list
+
+
+def create_host_access_list(snapshot, host, host_state):
+ """ This method creates a List of dictionaries which will be used
+ to modify the list of hosts mapped to a snapshot """
+ host_access_list = []
+ hosts_dict = get_hosts_dict(snapshot)
+ # If snapshot is not attached to any host.
+ if not hosts_dict:
+ return None
+ if to_update_host_list(snapshot, host, host_state):
+ if host_state == "mapped":
+ return None
+ for snap_host in list(hosts_dict.keys()):
+ if snap_host != host:
+ access_dict = {'host': snap_host,
+ 'allowedAccess': hosts_dict[snap_host]}
+ host_access_list.append(access_dict)
+ return host_access_list
+
+
+def get_hosts_dict(snapshot):
+ """ This method creates a dictionary, with host as key and
+ allowed access as value """
+ hosts_dict = {}
+ LOG.info("Inside get_hosts_dict")
+ if not snapshot.host_access:
+ return hosts_dict
+ for host_access_obj in snapshot.host_access:
+ hosts_dict[host_access_obj.host] = \
+ host_access_obj.allowed_access
+ return hosts_dict
+
+
+def to_update_host_list(snapshot, host, host_state):
+ """ Determines whether to update hosts list or not"""
+ hosts_dict = get_hosts_dict(snapshot)
+ if (not hosts_dict or host not in list(hosts_dict.keys()))\
+ and host_state == "mapped":
+ return True
+ if (hosts_dict and host in list(hosts_dict.keys())) \
+ and host_state == "unmapped":
+ return True
+ return False
+
+
+def to_update_expiry_time(snapshot, expiry_time=None):
+ """ Check whether to update expiry_time or not"""
+ if not expiry_time:
+ return False
+ if snapshot.expiration_time is None:
+ return True
+ if convert_timestamp_to_sec(expiry_time, snapshot.expiration_time) != 0:
+ return True
+ return False
+
+
+def convert_timestamp_to_sec(expiry_time, snap_time):
+ """Converts the time difference to seconds"""
+ snap_time_str = snap_time.strftime('%m/%d/%Y %H:%M')
+ snap_timestamp = datetime.strptime(snap_time_str, '%m/%d/%Y %H:%M')
+ expiry_timestamp = datetime.strptime(expiry_time, "%m/%d/%Y %H:%M")
+ return int((expiry_timestamp - snap_timestamp).total_seconds())
+
+
+def get_snapshot_parameters():
+ """This method provide parameter required for the ansible snapshot
+ module on Unity"""
+ return dict(
+ snapshot_name=dict(required=False, type='str'),
+ snapshot_id=dict(required=False, type='str'),
+ vol_name=dict(required=False, type='str'),
+ cg_name=dict(required=False, type='str'),
+ auto_delete=dict(required=False, type='bool'),
+ expiry_time=dict(required=False, type='str'),
+ description=dict(required=False, type='str'),
+ new_snapshot_name=dict(required=False, type='str'),
+ host_name=dict(required=False, type='str'),
+ host_id=dict(required=False, type='str'),
+ host_state=dict(required=False, type='str',
+ choices=['mapped', 'unmapped']),
+ state=dict(required=True, type='str', choices=['present', 'absent'])
+ )
+
+
+def main():
+ """ Create Unity Snapshot object and perform actions on it
+ based on user input from playbook"""
+ obj = Snapshot()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py b/ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py
new file mode 100644
index 000000000..aba5524cd
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py
@@ -0,0 +1,1002 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing snapshot schedules on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+module: snapshotschedule
+version_added: '1.1.0'
+short_description: Manage snapshot schedules on Unity storage system
+description:
+- Managing snapshot schedules on Unity storage system includes
+ creating new snapshot schedule, getting details of snapshot schedule,
+ modifying attributes of snapshot schedule, and deleting snapshot schedule.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Akash Shendge (@shenda1) <ansible.team@dell.com>
+
+options:
+ name:
+ description:
+ - The name of the snapshot schedule.
+ - Name is mandatory for a create operation.
+ - Specify either I(name) or I(id) (but not both) for any operation.
+ type: str
+ id:
+ description:
+ - The ID of the snapshot schedule.
+ type: str
+ type:
+ description:
+ - Type of the rule to be included in snapshot schedule.
+ - Type is mandatory for any create or modify operation.
+ - Once the snapshot schedule is created with one type it can be modified.
+ type: str
+ choices: ['every_n_hours', 'every_day', 'every_n_days', 'every_week',
+ 'every_month']
+ interval:
+ description:
+ - Number of hours between snapshots.
+ - Applicable only when rule type is C(every_n_hours).
+ type: int
+ hours_of_day:
+ description:
+ - Hours of the day when the snapshot will be taken.
+ - Applicable only when rule type is C(every_day).
+ type: list
+ elements: int
+ day_interval:
+ description:
+ - Number of days between snapshots.
+ - Applicable only when rule type is C(every_n_days).
+ type: int
+ days_of_week:
+ description:
+ - Days of the week for which the snapshot schedule rule applies.
+ - Applicable only when rule type is C(every_week).
+ type: list
+ elements: str
+ choices: ['SUNDAY', 'MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY',
+ 'FRIDAY', 'SATURDAY']
+ day_of_month:
+ description:
+ - Day of the month for which the snapshot schedule rule applies.
+ - Applicable only when rule type is C(every_month).
+ - Value should be [1, 31].
+ type: int
+ hour:
+ description:
+ - The hour when the snapshot will be taken.
+ - Applicable for C(every_n_days), C(every_week), C(every_month) rule types.
+ - For create operation, if I(hour) parameter is not specified, value will
+ be taken as C(0).
+ - Value should be [0, 23].
+ type: int
+ minute:
+ description:
+ - Minute offset from the hour when the snapshot will be taken.
+ - Applicable for all rule types.
+ - For a create operation, if I(minute) parameter is not specified, value will
+ be taken as C(0).
+ - Value should be [0, 59].
+ type: int
+ desired_retention:
+ description:
+ - The number of days/hours for which snapshot will be retained.
+ - When I(auto_delete) is C(true), I(desired_retention) cannot be specified.
+ - Maximum desired retention supported is 31 days or 744 hours.
+ type: int
+ retention_unit:
+ description:
+ - The retention unit for the snapshot.
+ default: 'hours'
+ type: str
+ choices: ['hours' , 'days']
+ auto_delete:
+ description:
+ - Indicates whether the system can automatically delete the snapshot.
+ type: bool
+ state:
+ description:
+ - Define whether the snapshot schedule should exist or not.
+ type: str
+ required: true
+ choices: [absent, present]
+notes:
+- Snapshot schedule created through Ansible will have only one rule.
+- Modification of rule type is not allowed. Within the same type, other
+ parameters can be modified.
+- If an existing snapshot schedule has more than 1 rule in it, only get and
+ delete operation is allowed.
+- The I(check_mode) is not supported.
+"""
+
+EXAMPLES = r"""
+- name: Create snapshot schedule (Rule Type - every_n_hours)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_N_Hours_Testing"
+ type: "every_n_hours"
+ interval: 6
+ desired_retention: 24
+ state: "{{state_present}}"
+
+- name: Create snapshot schedule (Rule Type - every_day)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Day_Testing"
+ type: "every_day"
+ hours_of_day:
+ - 8
+ - 14
+ auto_delete: True
+ state: "{{state_present}}"
+
+- name: Create snapshot schedule (Rule Type - every_n_days)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_N_Day_Testing"
+ type: "every_n_days"
+ day_interval: 2
+ desired_retention: 16
+ retention_unit: "days"
+ state: "{{state_present}}"
+
+- name: Create snapshot schedule (Rule Type - every_week)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Week_Testing"
+ type: "every_week"
+ days_of_week:
+ - MONDAY
+ - FRIDAY
+ hour: 12
+ minute: 30
+ desired_retention: 200
+ state: "{{state_present}}"
+
+- name: Create snapshot schedule (Rule Type - every_month)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Month_Testing"
+ type: "every_month"
+ day_of_month: 17
+ auto_delete: True
+ state: "{{state_present}}"
+
+- name: Get snapshot schedule details using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_N_Hours_Testing"
+ state: "{{state_present}}"
+
+- name: Get snapshot schedule details using id
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ id: "{{id}}"
+ state: "{{state_present}}"
+
+- name: Modify snapshot schedule details id
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ id: "{{id}}"
+ type: "every_n_hours"
+ interval: 8
+ state: "{{state_present}}"
+
+- name: Modify snapshot schedule using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Day_Testing"
+ type: "every_day"
+ desired_retention: 200
+ auto_delete: False
+ state: "{{state_present}}"
+
+- name: Delete snapshot schedule using id
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ id: "{{id}}"
+ state: "{{state_absent}}"
+
+- name: Delete snapshot schedule using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{unispherehost}}"
+ validate_certs: "{{validate_certs}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "Ansible_Every_Day_Testing"
+ state: "{{state_absent}}"
+"""
+
+RETURN = r"""
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: True
+
+snapshot_schedule_details:
+ description: Details of the snapshot schedule.
+ returned: When snapshot schedule exists
+ type: dict
+ contains:
+ id:
+ description: The system ID given to the snapshot schedule.
+ type: str
+ name:
+ description: The name of the snapshot schedule.
+ type: str
+ luns:
+ description: Details of volumes for which snapshot schedule
+ applied.
+ type: dict
+ contains:
+ UnityLunList:
+ description: List of volumes for which snapshot schedule
+ applied.
+ type: list
+ contains:
+ UnityLun:
+ description: Detail of volume.
+ type: dict
+ contains:
+ id:
+ description: The system ID given to volume.
+ type: str
+ rules:
+ description: Details of rules that apply to snapshot schedule.
+ type: list
+ contains:
+ id:
+ description: The system ID of the rule.
+ type: str
+ interval:
+ description: Number of days or hours between snaps,
+ depending on the rule type.
+ type: int
+ hours:
+ description: Hourly frequency for the snapshot
+ schedule rule.
+ type: list
+ minute:
+ description: Minute frequency for the snapshot
+ schedule rule.
+ type: int
+ days_of_week:
+ description: Days of the week for which the snapshot
+ schedule rule applies.
+ type: dict
+ contains:
+ DayOfWeekEnumList:
+ description: Enumeration of days of the week.
+ type: list
+ days_of_month:
+ description: Days of the month for which the snapshot
+ schedule rule applies.
+ type: list
+ retention_time:
+ description: Period of time in seconds for which to keep
+ the snapshot.
+ type: int
+ retention_time_in_hours:
+ description: Period of time in hours for which to keep the
+ snapshot.
+ type: int
+ rule_type:
+ description: Type of the rule applied to snapshot schedule.
+ type: str
+ is_auto_delete:
+ description: Indicates whether the system can automatically
+ delete the snapshot based on pool automatic-deletion
+ thresholds.
+ type: bool
+ storage_resources:
+ description: Details of storage resources for which snapshot.
+ schedule applied.
+ type: dict
+ contains:
+ UnityStorageResourceList:
+ description: List of storage resources for which snapshot
+ schedule applied.
+ type: list
+ contains:
+ UnityStorageResource:
+ description: Detail of storage resource.
+ type: dict
+ contains:
+ id:
+ description: The system ID given to storage
+ resource.
+ type: str
+ sample: {
+ "existed": true,
+ "hash": 8742032390151,
+ "id": "snapSch_63",
+ "is_default": false,
+ "is_modified": null,
+ "is_sync_replicated": false,
+ "luns": null,
+ "modification_time": "2021-12-14 21:37:47.905000+00:00",
+ "name": "SS7_empty_hour_SS",
+ "rules": [
+ {
+ "access_type": "FilesystemSnapAccessTypeEnum.CHECKPOINT",
+ "days_of_month": null,
+ "days_of_week": {
+ "DayOfWeekEnumList": []
+ },
+ "existed": true,
+ "hash": 8742032280772,
+ "hours": [
+ 0
+ ],
+ "id": "SchedRule_109",
+ "interval": 2,
+ "is_auto_delete": false,
+ "minute": 0,
+ "retention_time": 86400,
+ "retention_time_in_hours": 24,
+ "rule_type": "every_n_days",
+ "type": "ScheduleTypeEnum.N_DAYS_AT_HHMM"
+ }
+ ],
+ "storage_resources": null,
+ "version": "ScheduleVersionEnum.LEGACY"
+ }
+"""
+
+import logging
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('snapshotschedule')
+
+application_type = "Ansible/1.6.0"
+
+
+class SnapshotSchedule(object):
+ """Class with snapshot schedule operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_snapshotschedule_parameters())
+
+ mutually_exclusive = [['name', 'id'], ['interval', 'hour'],
+ ['hours_of_day', 'hour'],
+ ['interval', 'hours_of_day', 'day_interval',
+ 'days_of_week', 'day_of_month']]
+ required_one_of = [['name', 'id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of
+ )
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+
+ def schedule_modify_required(self, schedule_details):
+ """Check if the desired snapshot schedule state is different from
+ existing snapshot schedule state
+ :param schedule_details: The dict containing snapshot schedule
+ details
+ :return: Boolean value to indicate if modification is needed
+ """
+
+ # Check if existing snapshot schedule has auto_delete = True and
+ # playbook sets desired retention without mentioning auto_delete
+ if schedule_details['rules'][0]['is_auto_delete'] and\
+ self.module.params['desired_retention']\
+ and self.module.params['auto_delete'] is None:
+ self.module.fail_json(msg="Desired retention cannot be "
+ "specified when auto_delete is true"
+ )
+ if schedule_details['rules'][0]['retention_time'] and \
+ self.module.params['auto_delete']:
+ self.module.fail_json(msg="auto_delete cannot be specified when"
+ " existing desired retention is set")
+
+ desired_rule_type = get_schedule_value(self.module.params['type'])
+ existing_rule_string = schedule_details['rules'][0][
+ 'type'].split(".")[1]
+ existing_rule_type = utils.ScheduleTypeEnum[
+ existing_rule_string]._get_properties()['value']
+ modified = False
+
+ # Check if rule type is modified
+ if desired_rule_type != existing_rule_type:
+ self.module.fail_json(msg="Modification of rule type is not"
+ " allowed.")
+
+ # Convert desired retention to seconds
+ duration_in_sec = convert_retention_to_seconds(
+ self.module.params['desired_retention'],
+ self.module.params['retention_unit'])
+
+ if not duration_in_sec:
+ duration_in_sec = schedule_details['rules'][0]['retention_time']
+
+ # Check if common parameters for the rules getting modified
+ if (duration_in_sec and duration_in_sec != schedule_details[
+ 'rules'][0]['retention_time']):
+ modified = True
+ elif (self.module.params['auto_delete'] is not None and
+ self.module.params['auto_delete'] != schedule_details['rules']
+ [0]['is_auto_delete']):
+ modified = True
+
+ if (self.module.params['minute'] is not None and self.module.params[
+ 'minute'] != schedule_details['rules'][0]['minute']):
+ modified = True
+
+ if not modified and desired_rule_type == 0:
+ if (self.module.params['interval'] and self.module.params[
+ 'interval'] != schedule_details['rules'][0]['interval']):
+ modified = True
+ elif not modified and desired_rule_type == 1:
+ if (self.module.params['hours_of_day'] and
+ set(self.module.params['hours_of_day']) !=
+ set(schedule_details['rules'][0]['hours'])):
+ modified = True
+ elif not modified and desired_rule_type == 2:
+ if (self.module.params['day_interval'] and self.module.params[
+ 'day_interval'] != schedule_details['rules'][0]['interval'])\
+ or (self.module.params['hour'] is not None and
+ self.module.params['hour'] != schedule_details[
+ 'rules'][0]['hours'][0]):
+ modified = True
+ elif not modified and desired_rule_type == 3:
+ days = schedule_details['rules'][0]['days_of_week'][
+ 'DayOfWeekEnumList']
+ existing_days = list()
+
+ for day in days:
+ temp = day.split(".")
+ existing_days.append(temp[1])
+
+ if (self.module.params['days_of_week'] and
+ set(self.module.params['days_of_week']) !=
+ set(existing_days)) or\
+ (self.module.params['hour'] is not None and
+ self.module.params['hour'] != schedule_details['rules'][
+ 0]['hours'][0]):
+ modified = True
+ elif not modified and desired_rule_type == 4:
+ if (self.module.params['day_of_month'] and self.module.params[
+ 'day_of_month'] != schedule_details['rules'][0][
+ 'days_of_month'][0]) or\
+ (self.module.params['hour'] is not None and
+ self.module.params['hour'] != schedule_details['rules'][
+ 0]['hours'][0]):
+ modified = True
+ LOG.info("Modify Flag: %s", modified)
+ return modified
+
+ def get_days_of_week_enum(self, days_of_week):
+ """Get the enum for days of week.
+ :param days_of_week: The list of days of week
+ :return: The list of days_of_week enum
+ """
+
+ days_of_week_enum = []
+ for day in days_of_week:
+ if day in utils.DayOfWeekEnum.__members__:
+ days_of_week_enum.append(utils.DayOfWeekEnum[day])
+ else:
+ errormsg = "Invalid choice {0} for days of week".format(day)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ return days_of_week_enum
+
+ def create_rule(self, type, interval, hours_of_day, day_interval,
+ days_of_week, day_of_month, hour, minute,
+ desired_retention, retention_unit, auto_delete,
+ schedule_details=None):
+ """Create the rule."""
+
+ duration_in_sec = None
+ if desired_retention:
+ duration_in_sec = convert_retention_to_seconds(desired_retention,
+ retention_unit)
+
+ if not duration_in_sec and schedule_details:
+ duration_in_sec = schedule_details['rules'][0]['retention_time']
+
+ if hour is None and schedule_details is None:
+ hour = 0
+
+ if hour is None and schedule_details:
+ if schedule_details['rules'][0]['hours'] is not None:
+ hour = schedule_details['rules'][0]['hours'][0]
+
+ if minute is None and schedule_details is None:
+ minute = 0
+
+ if minute is None and schedule_details:
+ minute = schedule_details['rules'][0]['minute']
+
+ try:
+ if type == "every_n_hours":
+ if not interval:
+ interval = schedule_details['rules'][0]['interval']
+ rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\
+ every_n_hours(hour_interval=interval, minute=minute,
+ retention_time=duration_in_sec,
+ is_auto_delete=auto_delete)
+ elif type == "every_day":
+ if not hours_of_day:
+ hours_of_day = schedule_details['rules'][0]['hours']
+
+ rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\
+ every_day(hours=hours_of_day, minute=minute,
+ retention_time=duration_in_sec,
+ is_auto_delete=auto_delete)
+ elif type == "every_n_days":
+ if not day_interval:
+ day_interval = schedule_details['rules'][0]['interval']
+
+ rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\
+ every_n_days(day_interval=day_interval, hour=hour,
+ minute=minute,
+ retention_time=duration_in_sec,
+ is_auto_delete=auto_delete)
+ elif type == "every_week":
+ if days_of_week:
+ days_of_week_enum = self.get_days_of_week_enum(days_of_week)
+ else:
+ days = schedule_details['rules'][0]['days_of_week'][
+ 'DayOfWeekEnumList']
+ existing_days = list()
+
+ for day in days:
+ temp = day.split(".")
+ existing_days.append(temp[1])
+ days_of_week_enum = self.get_days_of_week_enum(days_of_week)
+
+ rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\
+ every_week(days_of_week=days_of_week_enum, hour=hour,
+ minute=minute, retention_time=duration_in_sec,
+ is_auto_delete=auto_delete)
+ else:
+ if day_of_month:
+ day_of_month_list = [day_of_month]
+ else:
+ day_of_month_list = schedule_details['rules'][0][
+ 'days_of_month']
+
+ rule_dict = utils.snap_schedule.UnitySnapScheduleRule.\
+ every_month(days_of_month=day_of_month_list, hour=hour,
+ minute=minute, retention_time=duration_in_sec,
+ is_auto_delete=auto_delete)
+
+ return rule_dict
+
+ except Exception as e:
+ errormsg = "Create operation of snapshot schedule rule " \
+ " failed with error {0}".format(str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def create_snapshot_schedule(self, name, rule_dict):
+ """Create snapshot schedule.
+ :param name: The name of the snapshot schedule
+ :param rule_dict: The dict of the rule
+ :return: Boolean value to indicate if snapshot schedule created
+ """
+
+ try:
+ utils.snap_schedule.UnitySnapSchedule.create(
+ cli=self.unity_conn._cli, name=name, rules=[rule_dict])
+ return True
+
+ except Exception as e:
+ errormsg = "Create operation of snapshot schedule {0} failed" \
+ " with error {1}".format(name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_desired_retention(self, desired_retention, retention_unit):
+ """Validates the specified desired retention.
+ :param desired_retention: Desired retention of the snapshot
+ schedule
+ :param retention_unit: Retention unit for the snapshot schedule
+ """
+
+ if retention_unit == 'hours' and (desired_retention < 1 or
+ desired_retention > 744):
+ self.module.fail_json(msg="Please provide a valid integer as the"
+ " desired retention between 1 and 744.")
+ elif retention_unit == 'days' and (desired_retention < 1 or
+ desired_retention > 31):
+ self.module.fail_json(msg="Please provide a valid integer as the"
+ " desired retention between 1 and 31.")
+
+ def return_schedule_instance(self, id):
+ """Return the snapshot schedule instance
+ :param id: The id of the snapshot schedule
+ :return: Instance of the snapshot schedule
+ """
+
+ try:
+ obj_schedule = utils.snap_schedule.UnitySnapSchedule.get(
+ self.unity_conn._cli, id)
+ return obj_schedule
+
+ except Exception as e:
+ error_msg = "Failed to get the snapshot schedule {0} instance" \
+ " with error {1}".format(id, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def delete_snapshot_schedule(self, id):
+ """Delete snapshot schedule.
+ :param id: The ID of the snapshot schedule
+ :return: The boolean value to indicate if snapshot schedule
+ deleted
+ """
+
+ try:
+ obj_schedule = self.return_schedule_instance(id=id)
+ obj_schedule.delete()
+ return True
+
+ except Exception as e:
+ errormsg = "Delete operation of snapshot schedule id:{0} failed" \
+ " with error {1}".format(id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def modify_snapshot_schedule(self, id, schedule_details):
+ """Modify snapshot schedule details.
+ :param id: The id of the snapshot schedule
+ :param schedule_details: The dict containing schedule details
+ :return: The boolean value to indicate if snapshot schedule
+ modified
+ """
+
+ try:
+ obj_schedule = self.return_schedule_instance(id=id)
+ rule_id = schedule_details['rules'][0]['id']
+
+ if self.module.params['auto_delete'] is None:
+ auto_delete = schedule_details['rules'][0]['is_auto_delete']
+ else:
+ auto_delete = self.module.params['auto_delete']
+
+ if schedule_details['rules'][0]['is_auto_delete'] and\
+ self.module.params['desired_retention'] and\
+ self.module.params['auto_delete'] is False:
+ auto_delete = False
+ elif schedule_details['rules'][0]['retention_time']:
+ auto_delete = None
+
+ rule_dict = self.create_rule(
+ self.module.params['type'], self.module.params['interval'],
+ self.module.params['hours_of_day'],
+ self.module.params['day_interval'],
+ self.module.params['days_of_week'],
+ self.module.params['day_of_month'],
+ self.module.params['hour'], self.module.params['minute'],
+ self.module.params['desired_retention'],
+ self.module.params['retention_unit'], auto_delete,
+ schedule_details)
+
+ obj_schedule.modify(add_rules=[rule_dict],
+ remove_rule_ids=[rule_id])
+ return True
+ except Exception as e:
+ errormsg = "Modify operation of snapshot schedule id:{0} failed" \
+ " with error {1}".format(id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_details(self, id=None, name=None):
+ """Get snapshot schedule details.
+ :param id: The id of the snapshot schedule
+ :param name: The name of the snapshot schedule
+ :return: Dict containing snapshot schedule details if exists
+ """
+
+ id_or_name = id if id else name
+ errormsg = "Failed to get details of snapshot schedule {0} with" \
+ " error {1}"
+ try:
+ if not id:
+ details = utils.snap_schedule.UnitySnapScheduleList.get(
+ self.unity_conn._cli, name=name)
+
+ if details:
+ id = details[0].id
+
+ if id:
+ details = self.unity_conn.get_snap_schedule(_id=id)
+
+ if id and details.existed:
+ rule_list = [rules._get_properties() for rules in
+ details.rules]
+ for rule in rule_list:
+ rule['retention_time_in_hours'] = int(
+ rule['retention_time'] / 3600)
+ rule['rule_type'] = get_rule_type(rule['type'])
+ schedule_details = details._get_properties()
+ schedule_details['rules'] = rule_list
+ return schedule_details
+ else:
+ LOG.info("Failed to get the snapshot schedule %s", id_or_name)
+ return None
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ auth_err = "Incorrect username or password, {0}".format(
+ e.message)
+ msg = errormsg.format(id_or_name, auth_err)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+ else:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ return None
+
+ except Exception as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def validate_parameters(self):
+ """Validate the parameters."""
+
+ try:
+ if self.module.params['interval'] is not None and\
+ self.module.params['interval'] <= 0:
+ self.module.fail_json(msg="Interval can not be less than or"
+ " equal to 0.")
+
+ param_list = ['day_interval', 'day_of_month']
+ for param in param_list:
+ if self.module.params[param] is not None and\
+ self.module.params[param] == 0:
+ self.module.fail_json(msg="{0} can not be 0.".format(
+ param))
+
+ except Exception as e:
+ errormsg = "Failed to validate the module param with error" \
+ " {0}".format(str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on snapshot schedule module based on
+ parameters chosen in playbook
+ """
+ name = self.module.params['name']
+ id = self.module.params['id']
+ type = self.module.params['type']
+ interval = self.module.params['interval']
+ hours_of_day = self.module.params['hours_of_day']
+ day_interval = self.module.params['day_interval']
+ days_of_week = self.module.params['days_of_week']
+ day_of_month = self.module.params['day_of_month']
+ hour = self.module.params['hour']
+ minute = self.module.params['minute']
+ desired_retention = self.module.params['desired_retention']
+ retention_unit = self.module.params['retention_unit']
+ auto_delete = self.module.params['auto_delete']
+ state = self.module.params['state']
+
+ # result is a dictionary that contains changed status and snapshot
+ # schedule details
+ result = dict(
+ changed=False,
+ snapshot_schedule_details={}
+ )
+
+ self.validate_parameters()
+
+ if desired_retention is not None:
+ self.validate_desired_retention(desired_retention, retention_unit)
+
+ if auto_delete and desired_retention:
+ self.module.fail_json(msg="Desired retention cannot be "
+ "specified when auto_delete is true"
+ )
+
+ schedule_details = self.get_details(name=name, id=id)
+
+ if not id and schedule_details:
+ id = schedule_details['id']
+
+ if state == 'present' and not schedule_details:
+ if not name:
+ msg = "The parameter name length is 0. It is too short." \
+ " The min length is 1."
+ self.module.fail_json(msg=msg)
+
+ if not type:
+ self.module.fail_json(msg="Rule type is necessary to create"
+ " snapshot schedule")
+
+ if type == "every_n_hours" and interval is None:
+ self.module.fail_json(msg="To create snapshot schedule with"
+ " rule type every_n_hours, interval"
+ " is the mandatory parameter.")
+ elif type == "every_day" and hours_of_day is None:
+ self.module.fail_json(msg="To create snapshot schedule with"
+ " rule type every_day, hours_of_day"
+ " is the mandatory parameter.")
+ elif type == "every_n_days" and day_interval is None:
+ self.module.fail_json(msg="To create snapshot schedule with"
+ " rule type every_n_days,"
+ " day_interval is the mandatory"
+ " parameter.")
+ elif type == "every_week" and days_of_week is None:
+ self.module.fail_json(msg="To create snapshot schedule with"
+ " rule type every_week,"
+ " days_of_week is the mandatory"
+ " parameter.")
+ elif type == "every_month" and day_of_month is None:
+ self.module.fail_json(msg="To create snapshot schedule with"
+ " rule type every_month,"
+ " day_of_month is the mandatory"
+ " parameter.")
+
+ rule_dict = self.create_rule(type, interval, hours_of_day,
+ day_interval, days_of_week,
+ day_of_month, hour, minute,
+ desired_retention, retention_unit,
+ auto_delete)
+ result['changed'] = self.create_snapshot_schedule(name, rule_dict)
+
+ elif state == 'absent' and schedule_details:
+ result['changed'] = self.delete_snapshot_schedule(id)
+
+ if state == 'present' and type and schedule_details and\
+ len(schedule_details['rules']) == 1:
+ if (self.schedule_modify_required(schedule_details)):
+ result['changed'] = self.modify_snapshot_schedule(
+ id, schedule_details)
+
+ result['snapshot_schedule_details'] = self.get_details(name=name,
+ id=id)
+ self.module.exit_json(**result)
+
+
+def get_rule_type(type):
+ """Get the rule type of schedule.
+ :param type: The schedule type enum
+ :return: The rule type of snapshot schedule
+ """
+
+ schedule_type = {
+ "ScheduleTypeEnum.N_HOURS_AT_MM": "every_n_hours",
+ "ScheduleTypeEnum.DAY_AT_HHMM": "every_day",
+ "ScheduleTypeEnum.N_DAYS_AT_HHMM": "every_n_days",
+ "ScheduleTypeEnum.SELDAYS_AT_HHMM": "every_week",
+ "ScheduleTypeEnum.NTH_DAYOFMONTH_AT_HHMM": "every_month"
+ }
+
+ return schedule_type.get(type)
+
+
+def get_schedule_value(type):
+ """Get the enum for schedule.
+ :param type: The type of rule
+ :return: The enum value for rule
+ """
+
+ rule_type = {
+ "every_n_hours": 0,
+ "every_day": 1,
+ "every_n_days": 2,
+ "every_week": 3,
+ "every_month": 4
+ }
+
+ return rule_type.get(type)
+
+
+def convert_retention_to_seconds(desired_retention, retention_unit):
+ """Convert desired retention to seconds.
+ :param desired_retention: The desired retention for snapshot
+ schedule
+ :param retention_unit: The retention unit for snapshot schedule
+ :return: The integer value in seconds
+ """
+
+ duration_in_sec = None
+ if desired_retention:
+ if retention_unit == 'hours':
+ duration_in_sec = desired_retention * 60 * 60
+ else:
+ duration_in_sec = desired_retention * 24 * 60 * 60
+ return duration_in_sec
+
+
+def get_snapshotschedule_parameters():
+ """This method provide parameters required for the ansible snapshot
+ schedule module on Unity"""
+
+ return dict(
+ name=dict(type='str'),
+ id=dict(type='str'),
+ type=dict(type='str', choices=['every_n_hours', 'every_day',
+ 'every_n_days', 'every_week',
+ 'every_month']),
+ interval=dict(type='int'),
+ hours_of_day=dict(type='list', elements='int'),
+ day_interval=dict(type='int'),
+ days_of_week=dict(type='list', elements='str',
+ choices=['SUNDAY', 'MONDAY', 'TUESDAY', 'WEDNESDAY',
+ 'THURSDAY', 'FRIDAY', 'SATURDAY']),
+ day_of_month=dict(type='int'),
+ hour=dict(type='int'),
+ minute=dict(type='int'),
+ desired_retention=dict(type='int'),
+ retention_unit=dict(type='str', choices=['hours', 'days'],
+ default='hours'),
+ auto_delete=dict(type='bool'),
+ state=dict(required=True, type='str', choices=['present', 'absent'])
+ )
+
+
+def main():
+ """ Create Unity snapshot schedule object and perform action on it
+ based on user input from playbook"""
+ obj = SnapshotSchedule()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/storagepool.py b/ansible_collections/dellemc/unity/plugins/modules/storagepool.py
new file mode 100644
index 000000000..ddb7eef65
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/storagepool.py
@@ -0,0 +1,879 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing storage pool on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: storagepool
+version_added: '1.1.0'
+short_description: Manage storage pool on Unity
+description:
+- Managing storage pool on Unity storage system contains the operations
+ Get details of storage pool,
+ Create a storage pool,
+ Modify storage pool.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Ambuj Dubey (@AmbujDube) <ansible.team@dell.com>
+
+options:
+ pool_name:
+ description:
+ - Name of the storage pool, unique in the storage system.
+ type: str
+
+ pool_id:
+ description:
+ - Unique identifier of the pool instance.
+ type: str
+
+ new_pool_name:
+ description:
+ - New name of the storage pool, unique in the storage system.
+ type: str
+
+ pool_description:
+ description:
+ - The description of the storage pool.
+ type: str
+
+ fast_cache:
+ description:
+ - Indicates whether the fast cache is enabled for the storage pool.
+ - C(Enabled) - FAST Cache is enabled for the pool.
+ - C(Disabled) - FAST Cache is disabled for the pool.
+ choices: [enabled, disabled]
+ type: str
+
+ fast_vp:
+ description:
+ - Indicates whether to enable scheduled data relocations for the pool.
+ - C(Enabled) - Enabled scheduled data relocations for the pool.
+ - C(Disabled) - Disabled scheduled data relocations for the pool.
+ choices: [enabled, disabled]
+ type: str
+
+ raid_groups:
+ description:
+ - Parameters to create RAID group from the disks and add it to the pool.
+ type: dict
+ suboptions:
+ disk_group_id:
+ description:
+ - Id of the disk group.
+ type: str
+
+ disk_num:
+ description:
+ - Number of disks.
+ type: int
+
+ raid_type:
+ description:
+ - RAID group types or RAID levels.
+ choices: [None, RAID5, RAID0, RAID1, RAID3, RAID10, RAID6, Mixed, Automatic]
+ type: str
+
+ stripe_width :
+ description:
+ - RAID group stripe widths, including parity or mirror disks.
+ choices: ['BEST_FIT', '2', '4', '5', '6', '8', '9', '10', '12', '13', '14', '16']
+ type: str
+
+ alert_threshold:
+ description:
+ - Threshold at which the system will generate alerts about the free space in the pool, specified as a percentage.
+ - Minimum threshold limit is 50.
+ - Maximum threshold limit is 84.
+ type: int
+
+ is_harvest_enabled:
+ description:
+ - Enable/Disable automatic deletion of snapshots based on pool space usage.
+ type: bool
+
+ pool_harvest_high_threshold:
+ description:
+ - Max threshold for space used in pool beyond which the system automatically starts deleting snapshots in the pool.
+ - Applies when the automatic deletion of snapshots based on pool space usage is enabled for the system and pool.
+ - Minimum pool harvest high threshold value is 1.
+ - Maximum pool harvest high threshold value is 99.
+ type: float
+
+ pool_harvest_low_threshold:
+ description:
+ - Min threshold for space used in pool below which the system automatically stops deletion of snapshots in the pool.
+ - Applies when the automatic deletion of snapshots based on pool space usage is enabled for the system and pool.
+ - Minimum pool harvest low threshold value is 0.
+ - Maximum pool harvest low threshold value is 98.
+ type: float
+
+ is_snap_harvest_enabled:
+ description:
+ - Enable/Disable automatic deletion of snapshots based on pool space usage.
+ type: bool
+
+ snap_harvest_high_threshold:
+ description:
+ - Max threshold for space used in snapshot beyond which the system automatically starts deleting snapshots in the pool.
+ - Applies when the automatic deletion of snapshots based on pool space usage is enabled for the pool.
+ - Minimum snap harvest high threshold value is 1.
+ - Maximum snap harvest high threshold value is 99.
+ type: float
+
+ snap_harvest_low_threshold:
+ description:
+ - Min threshold for space used in snapshot below which the system will stop automatically deleting snapshots in the pool.
+ - Applies when the automatic deletion of snapshots based on pool space usage is enabled for the pool.
+ - Minimum snap harvest low threshold value is 0.
+ - Maximum snap harvest low threshold value is 98.
+ type: float
+
+ pool_type:
+ description:
+ - Indicates storage pool type.
+ choices: [TRADITIONAL, DYNAMIC]
+ type: str
+
+ state:
+ description:
+ - Define whether the storage pool should exist or not.
+ - C(Present) - indicates that the storage pool should exist on the system.
+ - C(Absent) - indicates that the storage pool should not exist on the system.
+ choices: [absent, present]
+ type: str
+ required: true
+
+notes:
+- Deletion of storage pool is not allowed through Ansible module.
+- The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+- name: Get Storage pool details using pool_name
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_name: "{{pool_name}}"
+ state: "present"
+
+- name: Get Storage pool details using pool_id
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_id: "{{pool_id}}"
+ state: "present"
+
+- name: Modify Storage pool attributes using pool_name
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_name: "{{pool_name}}"
+ new_pool_name: "{{new_pool_name}}"
+ pool_description: "{{pool_description}}"
+ fast_cache: "{{fast_cache_enabled}}"
+ fast_vp: "{{fast_vp_enabled}}"
+ state: "present"
+
+- name: Modify Storage pool attributes using pool_id
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_id: "{{pool_id}}"
+ new_pool_name: "{{new_pool_name}}"
+ pool_description: "{{pool_description}}"
+ fast_cache: "{{fast_cache_enabled}}"
+ fast_vp: "{{fast_vp_enabled}}"
+ state: "present"
+
+- name: Create a StoragePool
+ dellemc.unity.storagepool:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ pool_name: "Test"
+ pool_description: "test pool"
+ raid_groups:
+ disk_group_id : "dg_16"
+ disk_num : 2
+ raid_type : "RAID10"
+ stripe_width : "BEST_FIT"
+ alert_threshold : 50
+ is_harvest_enabled : True
+ pool_harvest_high_threshold : 60
+ pool_harvest_low_threshold : 40
+ is_snap_harvest_enabled : True
+ snap_harvest_high_threshold : 70
+ snap_harvest_low_threshold : 50
+ fast_vp: "enabled"
+ fast_cache: "enabled"
+ pool_type : "DYNAMIC"
+ state: "present"
+
+'''
+
+RETURN = r'''
+ changed:
+ description: Whether or not the storage pool has changed.
+ returned: always
+ type: bool
+ sample: True
+
+ storage_pool_details:
+ description: The storage pool details.
+ returned: When storage pool exists.
+ type: dict
+ contains:
+ id:
+ description: Pool id, unique identifier of the pool.
+ type: str
+ name:
+ description: Pool name, unique in the storage system.
+ type: str
+ is_fast_cache_enabled:
+ description: Indicates whether the fast cache is enabled for the storage
+ pool.
+ true - FAST Cache is enabled for the pool.
+ false - FAST Cache is disabled for the pool.
+ type: bool
+ is_fast_vp_enabled:
+ description: Indicates whether to enable scheduled data relocations
+ for the storage pool.
+ true - Enabled scheduled data relocations for the pool.
+ false - Disabled scheduled data relocations for the pool.
+ type: bool
+ size_free_with_unit:
+ description: Indicates size_free with its appropriate unit
+ in human readable form.
+ type: str
+ size_subscribed_with_unit:
+ description: Indicates size_subscribed with its appropriate unit in
+ human readable form.
+ type: str
+ size_total_with_unit:
+ description: Indicates size_total with its appropriate unit in human
+ readable form.
+ type: str
+ size_used_with_unit:
+ description: Indicates size_used with its appropriate unit in human
+ readable form.
+ type: str
+ snap_size_subscribed_with_unit:
+ description: Indicates snap_size_subscribed with its
+ appropriate unit in human readable form.
+ type: str
+ snap_size_used_with_unit:
+ description: Indicates snap_size_used with its
+ appropriate unit in human readable form.
+ type: str
+ drives:
+ description: Indicates information about the drives
+ associated with the storage pool.
+ type: list
+ contains:
+ id:
+ description: Unique identifier of the drive.
+ type: str
+ name:
+ description: Indicates name of the drive.
+ type: str
+ size:
+ description: Indicates size of the drive.
+ type: str
+ disk_technology:
+ description: Indicates disk technology of the drive.
+ type: str
+ tier_type:
+ description: Indicates tier type of the drive.
+ type: str
+ sample: {
+ "alert_threshold": 50,
+ "creation_time": "2022-03-08 14:05:32+00:00",
+ "description": "",
+ "drives": [
+ {
+ "disk_technology": "SAS",
+ "id": "dpe_disk_22",
+ "name": "DPE Drive 22",
+ "size": 590860984320,
+ "tier_type": "PERFORMANCE"
+ },
+ {
+ "disk_technology": "SAS",
+ "id": "dpe_disk_23",
+ "name": "DPE Drive 23",
+ "size": 590860984320,
+ "tier_type": "PERFORMANCE"
+ },
+ {
+ "disk_technology": "SAS",
+ "id": "dpe_disk_24",
+ "name": "DPE Drive 24",
+ "size": 590860984320,
+ "tier_type": "PERFORMANCE"
+ }
+ ],
+ "existed": true,
+ "harvest_state": "UsageHarvestStateEnum.IDLE",
+ "hash": 8744642897210,
+ "health": {
+ "UnityHealth": {
+ "hash": 8744642799842
+ }
+ },
+ "id": "pool_280",
+ "is_all_flash": false,
+ "is_empty": false,
+ "is_fast_cache_enabled": false,
+ "is_fast_vp_enabled": false,
+ "is_harvest_enabled": true,
+ "is_snap_harvest_enabled": true,
+ "metadata_size_subscribed": 105763569664,
+ "metadata_size_used": 57176752128,
+ "name": "test_pool",
+ "object_id": 12884902146,
+ "pool_fast_vp": {
+ "UnityPoolFastVp": {
+ "hash": 8744647518980
+ }
+ },
+ "pool_space_harvest_high_threshold": 59.0,
+ "pool_space_harvest_low_threshold": 40.0,
+ "pool_type": "StoragePoolTypeEnum.DYNAMIC",
+ "raid_type": "RaidTypeEnum.RAID10",
+ "rebalance_progress": null,
+ "size_free": 470030483456,
+ "size_free_with_unit": "437.75 GB",
+ "size_subscribed": 447215820800,
+ "size_subscribed_with_unit": "416.5 GB",
+ "size_total": 574720311296,
+ "size_total_with_unit": "535.25 GB",
+ "size_used": 76838068224,
+ "size_used_with_unit": "71.56 GB",
+ "snap_size_subscribed": 128851369984,
+ "snap_size_subscribed_with_unit": "120.0 GB",
+ "snap_size_used": 2351104,
+ "snap_size_used_with_unit": "2.24 MB",
+ "snap_space_harvest_high_threshold": 80.0,
+ "snap_space_harvest_low_threshold": 60.0,
+ "tiers": {
+ "UnityPoolTierList": [
+ {
+ "disk_count": [
+ 0,
+ 3,
+ 0
+ ],
+ "existed": true,
+ "hash": 8744643017382,
+ "name": [
+ "Extreme Performance",
+ "Performance",
+ "Capacity"
+ ],
+ "pool_units": [
+ null,
+ {
+ "UnityPoolUnitList": [
+ {
+ "UnityPoolUnit": {
+ "hash": 8744642786759,
+ "id": "rg_4"
+ }
+ },
+ {
+ "UnityPoolUnit": {
+ "hash": 8744642786795,
+ "id": "rg_5"
+ }
+ }
+ ]
+ },
+ null
+ ],
+ "raid_type": [
+ "RaidTypeEnum.NONE",
+ "RaidTypeEnum.RAID10",
+ "RaidTypeEnum.NONE"
+ ],
+ "size_free": [
+ 0,
+ 470030483456,
+ 0
+ ],
+ "size_moving_down": [
+ 0,
+ 0,
+ 0
+ ],
+ "size_moving_up": [
+ 0,
+ 0,
+ 0
+ ],
+ "size_moving_within": [
+ 0,
+ 0,
+ 0
+ ],
+ "size_total": [
+ 0,
+ 574720311296,
+ 0
+ ],
+ "size_used": [
+ 0,
+ 104689827840,
+ 0
+ ],
+ "stripe_width": [
+ null,
+ "RaidStripeWidthEnum._2",
+ null
+ ],
+ "tier_type": [
+ "TierTypeEnum.EXTREME_PERFORMANCE",
+ "TierTypeEnum.PERFORMANCE",
+ "TierTypeEnum.CAPACITY"
+ ]
+ }
+ ]
+ }
+ }
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+import logging
+
+LOG = utils.get_logger('storagepool')
+
+application_type = "Ansible/1.6.0"
+
+
+class StoragePool(object):
+ """Class with storage pool operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_storagepool_parameters())
+
+ mutually_exclusive = [['pool_name', 'pool_id']]
+ required_one_of = [['pool_name', 'pool_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+
+ self.conn = utils.\
+ get_unity_unisphere_connection(self.module.params, application_type)
+
+ def get_details(self, pool_id=None, pool_name=None):
+ """ Get storage pool details"""
+ try:
+ api_response = self.conn.get_pool(_id=pool_id, name=pool_name)
+ details = api_response._get_properties()
+
+ is_fast_vp_enabled = api_response._get_property_from_raw(
+ 'pool_fast_vp').is_schedule_enabled
+ details['is_fast_vp_enabled'] = is_fast_vp_enabled
+
+ details['size_free_with_unit'] = utils.\
+ convert_size_with_unit(int(details['size_free']))
+
+ details['size_subscribed_with_unit'] = utils.\
+ convert_size_with_unit(int(details['size_subscribed']))
+
+ details['size_total_with_unit'] = utils.\
+ convert_size_with_unit(int(details['size_total']))
+
+ details['size_used_with_unit'] = utils.\
+ convert_size_with_unit(int(details['size_used']))
+
+ details['snap_size_subscribed_with_unit'] = utils.\
+ convert_size_with_unit(int(details['snap_size_subscribed']))
+
+ details['snap_size_used_with_unit'] = utils.\
+ convert_size_with_unit(int(details['snap_size_used']))
+
+ pool_instance = utils.UnityPool.get(self.conn._cli, details['id'])
+ pool_tier_list = []
+ pool_tier_list.append((pool_instance.tiers)._get_properties())
+ pool_tier_dict = {}
+ pool_tier_dict['UnityPoolTierList'] = pool_tier_list
+ details['tiers'] = pool_tier_dict
+ return details
+ except Exception as e:
+ error = str(e)
+ check_list = ['not found', 'no attribute']
+ if any(ele in error for ele in check_list):
+ error_message = "pool details are not found"
+ LOG.info(error_message)
+ return None
+ error_message = 'Get details of storage pool failed with ' \
+ 'error: {0}'.format(str(e))
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def is_pool_modification_required(self, storage_pool_details):
+ """ Check if attributes of storage pool needs to be modified
+ """
+ try:
+ if self.module.params['new_pool_name'] and \
+ self.module.params['new_pool_name'] != \
+ storage_pool_details['name']:
+ return True
+
+ if self.module.params['pool_description'] is not None and \
+ self.module.params['pool_description'] != \
+ storage_pool_details['description']:
+ return True
+
+ if self.module.params['fast_cache']:
+ if (self.module.params['fast_cache'] == "enabled" and
+ not storage_pool_details['is_fast_cache_enabled']) or\
+ (self.module.params['fast_cache'] == "disabled" and storage_pool_details['is_fast_cache_enabled']):
+ return True
+
+ if self.module.params['fast_vp']:
+ if (self.module.params['fast_vp'] == "enabled" and
+ not storage_pool_details['is_fast_vp_enabled']) or \
+ (self.module.params['fast_vp'] == "disabled" and
+ storage_pool_details['is_fast_vp_enabled']):
+ return True
+
+ LOG.info("modify not required")
+ return False
+
+ except Exception as e:
+ error_message = 'Failed to determine if any modification'\
+ 'required for pool attributes with error: {0}'.format(str(e))
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def pool_modify(self, id, new_pool_name,
+ pool_description, fast_cache, fast_vp):
+ """ Modify attributes of storage pool """
+ pool_obj = utils.UnityPool.get(self.conn._cli, id)
+ try:
+ pool_obj.modify(name=new_pool_name, description=pool_description,
+ is_fast_cache_enabled=fast_cache,
+ is_fastvp_enabled=fast_vp)
+ new_storage_pool_details = self.get_details(pool_id=id,
+ pool_name=None)
+ LOG.info("Modification Successful")
+ return new_storage_pool_details
+ except Exception as e:
+ if self.module.params['pool_id']:
+ pool_identifier = self.module.params['pool_id']
+ else:
+ pool_identifier = self.module.params['pool_name']
+ error_message = 'Modify attributes of storage pool {0} ' \
+ 'failed with error: {1}'.format(pool_identifier, str(e))
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def get_pool_drives(self, pool_id=None, pool_name=None):
+ """ Get pool drives attached to pool"""
+ pool_identifier = pool_id or pool_name
+ pool_drives_list = []
+ try:
+ drive_instances = utils.UnityDiskList.get(self.conn._cli)
+ if drive_instances:
+ for drive in drive_instances:
+ if drive.pool and (drive.pool.id == pool_identifier or drive.pool.name == pool_identifier):
+ pool_drive = {"id": drive.id, "name": drive.name, "size": drive.size,
+ "disk_technology": drive.disk_technology.name,
+ "tier_type": drive.tier_type.name}
+ pool_drives_list.append(pool_drive)
+ LOG.info("Successfully retrieved pool drive details")
+ return pool_drives_list
+ except Exception as e:
+ error_message = 'Get details of pool drives failed with ' \
+ 'error: {0}'.format(str(e))
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def get_raid_type_enum(self, raid_type):
+ """ Get raid_type_enum.
+ :param raid_type: The raid_type
+ :return: raid_type enum
+ """
+
+ if raid_type in utils.RaidTypeEnum.__members__:
+ return utils.RaidTypeEnum[raid_type]
+ else:
+ errormsg = "Invalid choice %s for Raid Type" % raid_type
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_raid_stripe_width_enum(self, stripe_width):
+ """ Get raid_stripe_width enum.
+ :param stripe_width: The raid_stripe_width
+ :return: raid_stripe_width enum
+ """
+ if stripe_width != "BEST_FIT":
+ stripe_width = "_" + stripe_width
+ if stripe_width in utils.RaidStripeWidthEnum.__members__:
+ return utils.RaidStripeWidthEnum[stripe_width]
+ else:
+ errormsg = "Invalid choice %s for stripe width" % stripe_width
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_pool_type_enum(self, pool_type):
+ """ Get the storage pool_type enum.
+ :param pool_type: The pool_type
+ :return: pool_type enum
+ """
+
+ if pool_type == "TRADITIONAL":
+ return 1
+ elif pool_type == "DYNAMIC":
+ return 2
+ else:
+ errormsg = "Invalid choice %s for Storage Pool Type" % pool_type
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_raid_groups(self, raid_groups):
+ """ Get the raid groups for creating pool"""
+ try:
+ disk_obj = utils.UnityDiskGroup.get(self.conn._cli, _id=raid_groups['disk_group_id'])
+ disk_num = raid_groups['disk_num']
+ raid_type = raid_groups['raid_type']
+ raid_type = self.get_raid_type_enum(raid_type) \
+ if raid_type else None
+ stripe_width = raid_groups['stripe_width']
+ stripe_width = self.get_raid_stripe_width_enum(stripe_width) \
+ if stripe_width else None
+ raid_group = utils.RaidGroupParameter(disk_group=disk_obj,
+ disk_num=disk_num, raid_type=raid_type,
+ stripe_width=stripe_width)
+ raid_groups = [raid_group]
+ return raid_groups
+ except Exception as e:
+ error_message = 'Failed to create storage pool with error: %s' % str(e)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def validate_create_pool_params(self, alert_threshold=None,
+ pool_harvest_high_threshold=None,
+ pool_harvest_low_threshold=None,
+ snap_harvest_high_threshold=None,
+ snap_harvest_low_threshold=None):
+ """ Validates params for creating pool"""
+ if alert_threshold and (alert_threshold < 50 or alert_threshold > 84):
+ errormsg = "Alert threshold is not in the allowed value range of 50 - 84"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ if pool_harvest_high_threshold and (pool_harvest_high_threshold < 1 or pool_harvest_high_threshold > 99):
+ errormsg = "Pool harvest high threshold is not in the allowed value range of 1 - 99"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ if pool_harvest_low_threshold and (pool_harvest_low_threshold < 0 or pool_harvest_low_threshold > 98):
+ errormsg = "Pool harvest low threshold is not in the allowed value range of 0 - 98"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ if snap_harvest_high_threshold and (snap_harvest_high_threshold < 1 or snap_harvest_high_threshold > 99):
+ errormsg = "Snap harvest high threshold is not in the allowed value range of 1 - 99"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+ if snap_harvest_low_threshold and (snap_harvest_low_threshold < 0 or snap_harvest_low_threshold > 98):
+ errormsg = "Snap harvest low threshold is not in the allowed value range of 0 - 98"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def create_pool(self, name, raid_groups):
+ """ Creates a StoragePool"""
+ try:
+ pool_obj = utils.UnityPool.get(self.conn._cli)
+ pool_description = self.module.params['pool_description']
+ raid_groups = self.get_raid_groups(raid_groups) \
+ if raid_groups else None
+ alert_threshold = self.module.params['alert_threshold']
+ pool_harvest_high_threshold = None
+ pool_harvest_low_threshold = None
+ snap_harvest_high_threshold = None
+ snap_harvest_low_threshold = None
+ is_harvest_enabled = self.module.params['is_harvest_enabled']
+ if is_harvest_enabled:
+ pool_harvest_high_threshold = self.module.params['pool_harvest_high_threshold']
+ pool_harvest_low_threshold = self.module.params['pool_harvest_low_threshold']
+ is_snap_harvest_enabled = self.module.params['is_snap_harvest_enabled']
+ if is_snap_harvest_enabled:
+ snap_harvest_high_threshold = self.module.params['snap_harvest_high_threshold']
+ snap_harvest_low_threshold = self.module.params['snap_harvest_low_threshold']
+ self.validate_create_pool_params(alert_threshold=alert_threshold,
+ pool_harvest_high_threshold=pool_harvest_high_threshold,
+ pool_harvest_low_threshold=pool_harvest_low_threshold,
+ snap_harvest_high_threshold=snap_harvest_high_threshold,
+ snap_harvest_low_threshold=snap_harvest_low_threshold)
+ pool_type = self.module.params['pool_type']
+ pool_type = self.get_pool_type_enum(pool_type) \
+ if pool_type else None
+ fast_vp = self.module.params['fast_vp']
+ if fast_vp:
+ if fast_vp == "enabled":
+ fast_vp = True
+ else:
+ fast_vp = False
+
+ pool_obj.create(self.conn._cli, name=name, description=pool_description, raid_groups=raid_groups,
+ alert_threshold=alert_threshold,
+ is_harvest_enabled=is_harvest_enabled,
+ is_snap_harvest_enabled=is_snap_harvest_enabled,
+ pool_harvest_high_threshold=pool_harvest_high_threshold,
+ pool_harvest_low_threshold=pool_harvest_low_threshold,
+ snap_harvest_high_threshold=snap_harvest_high_threshold,
+ snap_harvest_low_threshold=snap_harvest_low_threshold,
+ is_fastvp_enabled=fast_vp,
+ pool_type=pool_type)
+ LOG.info("Creation of storage pool successful")
+ storage_pool_details = self.get_details(pool_name=name)
+ changed = True
+ return changed, storage_pool_details
+ except Exception as e:
+ error_message = 'Failed to create storage pool with error: %s' % str(e)
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on storage pool module based on parameters
+ chosen in playbook
+ """
+ pool_name = self.module.params['pool_name']
+ pool_id = self.module.params['pool_id']
+ new_pool_name = self.module.params['new_pool_name']
+ pool_description = self.module.params['pool_description']
+ fast_cache = self.module.params['fast_cache']
+ fast_vp = self.module.params['fast_vp']
+ state = self.module.params['state']
+ raid_groups = self.module.params['raid_groups']
+ if fast_cache:
+ if fast_cache == "enabled":
+ fast_cache = True
+ else:
+ fast_cache = False
+
+ if fast_vp:
+ if fast_vp == "enabled":
+ fast_vp = True
+ else:
+ fast_vp = False
+
+ # result is a dictionary that contains changed status and storage pool details
+ result = dict(
+ changed=False,
+ storage_pool_details={}
+ )
+
+ storage_pool_details = self.get_details(pool_id, pool_name)
+ result['storage_pool_details'] = storage_pool_details
+
+ if state == 'absent' and storage_pool_details:
+ error_message = 'Deletion of storage pool is not allowed through'\
+ ' Ansible module'
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ # Create storage pool
+ if state == 'present' and not storage_pool_details:
+ if pool_name is not None and len(pool_name) != 0:
+ result['changed'], storage_pool_details \
+ = self.create_pool(name=pool_name, raid_groups=raid_groups)
+ result['storage_pool_details'] = storage_pool_details
+ else:
+ error_message = 'The parameter pool_name length is 0. It'\
+ ' is too short. The min length is 1'
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ # Get pool drive details
+ if result['storage_pool_details']:
+ result['storage_pool_details']['drives'] = self.get_pool_drives(pool_id=pool_id, pool_name=pool_name)
+
+ if state == 'present' and storage_pool_details:
+ if new_pool_name is not None and len(new_pool_name) == 0:
+ error_message = 'The parameter new_pool_name length is 0. It'\
+ ' is too short. The min length is 1'
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+ pool_modify_flag = self.\
+ is_pool_modification_required(storage_pool_details)
+ LOG.info("Storage pool modification flag %s",
+ str(pool_modify_flag))
+
+ if pool_modify_flag:
+ result['storage_pool_details'] = \
+ self.pool_modify(storage_pool_details['id'], new_pool_name,
+ pool_description, fast_cache, fast_vp)
+ result['changed'] = True
+ self.module.exit_json(**result)
+
+
+def get_storagepool_parameters():
+ """This method provides parameters required for the ansible storage pool
+ module on Unity"""
+ return dict(
+ pool_name=dict(required=False, type='str'),
+ pool_id=dict(required=False, type='str'),
+ new_pool_name=dict(required=False, type='str'),
+ pool_description=dict(required=False, type='str'),
+ fast_cache=dict(required=False, type='str', choices=['enabled',
+ 'disabled']),
+ fast_vp=dict(required=False, type='str', choices=['enabled',
+ 'disabled']),
+ state=dict(required=True, type='str', choices=['present', 'absent']),
+ raid_groups=dict(required=False, type='dict', options=dict(
+ disk_group_id=dict(required=False, type='str'),
+ disk_num=dict(required=False, type='int'),
+ raid_type=dict(required=False, type='str', choices=['None', 'RAID5', 'RAID0', 'RAID1', 'RAID3', 'RAID10',
+ 'RAID6', 'Mixed', 'Automatic']),
+ stripe_width=dict(required=False, type='str', choices=['BEST_FIT', '2', '4', '5',
+ '6', '8', '9', '10', '12', '13', '14', '16']))),
+ alert_threshold=dict(required=False, type='int'),
+ is_harvest_enabled=dict(required=False, type='bool'),
+ pool_harvest_high_threshold=dict(required=False, type='float'),
+ pool_harvest_low_threshold=dict(required=False, type='float'),
+ is_snap_harvest_enabled=dict(required=False, type='bool'),
+ snap_harvest_high_threshold=dict(required=False, type='float'),
+ snap_harvest_low_threshold=dict(required=False, type='float'),
+ pool_type=dict(required=False, type='str', choices=['TRADITIONAL', 'DYNAMIC'])
+ )
+
+
+def main():
+ """ Create Unity storage pool object and perform action on it
+ based on user input from playbook"""
+ obj = StoragePool()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/tree_quota.py b/ansible_collections/dellemc/unity/plugins/modules/tree_quota.py
new file mode 100644
index 000000000..063834b45
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/tree_quota.py
@@ -0,0 +1,706 @@
+#!/usr/bin/python
+# Copyright: (c) 2021, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing quota tree on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: tree_quota
+short_description: Manage quota tree on the Unity storage system
+description:
+- Managing Quota tree on the Unity storage system includes
+ Create quota tree,
+ Get quota tree,
+ Modify quota tree and
+ Delete quota tree.
+version_added: '1.2.0'
+extends_documentation_fragment:
+ - dellemc.unity.unity
+author:
+- Spandita Panigrahi (@panigs7) <ansible.team@dell.com>
+options:
+ filesystem_name:
+ description:
+ - The name of the filesystem for which quota tree is created.
+ - For creation or modification of a quota tree either I(filesystem_name) or
+ I(filesystem_id) is required.
+ type: str
+ filesystem_id:
+ description:
+ - The ID of the filesystem for which the quota tree is created.
+ - For creation of a quota tree either I(filesystem_id) or
+ I(filesystem_name) is required.
+ type: str
+ nas_server_name:
+ description:
+ - The name of the NAS server in which the filesystem is created.
+ - For creation of a quota tree either I(nas_server_name) or
+ I(nas_server_id) is required.
+ type: str
+ nas_server_id:
+ description:
+ - The ID of the NAS server in which the filesystem is created.
+ - For creation of a quota tree either I(filesystem_id) or
+ I(filesystem_name) is required.
+ type: str
+ tree_quota_id:
+ description:
+ - The ID of the quota tree.
+ - Either I(tree_quota_id) or I(path) to quota tree is required to
+ view/modify/delete quota tree.
+ type: str
+ path:
+ description:
+ - The path to the quota tree.
+ - Either I(tree_quota_id) or I(path) to quota tree is required to
+ create/view/modify/delete a quota tree.
+ - Path must start with a forward slash '/'.
+ type: str
+ hard_limit:
+ description:
+ - Hard limitation for a quota tree on the total space available. If exceeded,
+ users in quota tree cannot write data.
+ - Value C(0) implies no limit.
+ - One of the values of I(soft_limit) and I(hard_limit) can be C(0), however, both cannot be both C(0)
+ during creation of a quota tree.
+ type: int
+ soft_limit:
+ description:
+ - Soft limitation for a quota tree on the total space available. If exceeded,
+ notification will be sent to users in the quota tree for the grace period mentioned, beyond
+ which users cannot use space.
+ - Value C(0) implies no limit.
+ - Both I(soft_limit) and I(hard_limit) cannot be C(0) during creation of quota tree.
+ type: int
+ cap_unit:
+ description:
+ - Unit of I(soft_limit) and I(hard_limit) size.
+ - It defaults to C(GB) if not specified.
+ choices: ['MB', 'GB', 'TB']
+ type: str
+ description:
+ description:
+ - Description of a quota tree.
+ type: str
+ state:
+ description:
+ - The state option is used to mention the existence of the filesystem
+ quota tree.
+ type: str
+ required: true
+ choices: ['absent', 'present']
+
+notes:
+ - The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+ - name: Get quota tree details by quota tree id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ tree_quota_id: "treequota_171798700679_10"
+ state: "present"
+
+ - name: Get quota tree details by quota tree path
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "fs_2171"
+ nas_server_id: "nas_21"
+ path: "/test"
+ state: "present"
+
+ - name: Create quota tree for a filesystem with filesystem id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ hard_limit: 6
+ cap_unit: "TB"
+ soft_limit: 5
+ path: "/test_new"
+ state: "present"
+
+ - name: Create quota tree for a filesystem with filesystem name
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "Test_filesystem"
+ nas_server_name: "lglad068"
+ hard_limit: 6
+ cap_unit: "TB"
+ soft_limit: 5
+ path: "/test_new"
+ state: "present"
+
+ - name: Modify quota tree limit usage by quota tree path
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ path: "/test_new"
+ hard_limit: 10
+ cap_unit: "TB"
+ soft_limit: 8
+ state: "present"
+
+ - name: Modify quota tree by quota tree id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ tree_quota_id: "treequota_171798700679_10"
+ hard_limit: 12
+ cap_unit: "TB"
+ soft_limit: 10
+ state: "present"
+
+ - name: Delete quota tree by quota tree id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ tree_quota_id: "treequota_171798700679_10"
+ state: "absent"
+
+ - name: Delete quota tree by path
+ dellemc.unity.tree_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ path: "/test_new"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: True
+
+get_tree_quota_details:
+ description: Details of the quota tree.
+ returned: When quota tree exists
+ type: dict
+ contains:
+ filesystem:
+ description: Filesystem details for which the quota
+ tree is created.
+ type: dict
+ contains:
+ UnityFileSystem:
+ description: Filesystem details for which the
+ quota tree is created.
+ type: dict
+ contains:
+ id:
+ description: ID of the filesystem for
+ which the quota tree is create.
+ type: str
+ description:
+ description: Description of the quota tree.
+ type: str
+ path:
+ description: Path to quota tree.
+ A valid path must start with a forward slash '/'.
+ It is mandatory while creating a quota tree.
+ type: str
+ hard_limit:
+ description: Hard limit of quota tree.
+ If the quota tree's space usage exceeds
+ the hard limit, users in quota tree cannot write data.
+ type: int
+ soft_limit:
+ description: Soft limit of the quota tree.
+ If the quota tree's space usage exceeds the soft limit,
+ the storage system starts to count down based
+ on the specified grace period.
+ type: int
+ id:
+ description: Quota tree ID.
+ type: str
+ size_used:
+ description: Size of used space in the filesystem by the user files.
+ type: int
+ gp_left:
+ description: The grace period left after the
+ soft limit for the user quota is exceeded.
+ type: int
+ state:
+ description: State of the quota tree.
+ type: int
+ sample: {
+ "description": "",
+ "existed": true,
+ "filesystem": {
+ "UnityFileSystem": {
+ "hash": 8788549469862,
+ "id": "fs_137",
+ "name": "test",
+ "nas_server": {
+ "id": "nas_1",
+ "name": "lglad072"
+ }
+ }
+ },
+ "gp_left": null,
+ "hard_limit": "6.0 TB",
+ "hash": 8788549497558,
+ "id": "treequota_171798694897_1",
+ "path": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
+ "size_used": 0,
+ "soft_limit": "5.0 TB",
+ "state": 0
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('tree_quota')
+
+application_type = "Ansible/1.6.0"
+
+
+class QuotaTree(object):
+ """Class with Quota Tree operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_quota_tree_parameters())
+
+ mutually_exclusive = [['filesystem_name', 'filesystem_id'],
+ ['nas_server_name', 'nas_server_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive)
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+
+ def check_quota_tree_is_present(self, fs_id, path, tree_quota_id):
+ """
+ Check if quota tree is present in filesystem.
+ :param fs_id: ID of filesystem where quota tree is searched.
+ :param path: Path to the quota tree
+ :param tree_quota_id: ID of the quota tree
+ :return: ID of quota tree if it exists else None.
+ """
+ if tree_quota_id is None and path is None:
+ return None
+
+ all_tree_quota = self.unity_conn.get_tree_quota(filesystem=fs_id,
+ id=tree_quota_id,
+ path=path)
+
+ if tree_quota_id and len(all_tree_quota) == 0 \
+ and self.module.params['state'] == "present":
+ errormsg = "Tree quota %s does not exist." % tree_quota_id
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ if len(all_tree_quota) > 0:
+ msg = "Quota tree with id %s is present in filesystem %s" % (all_tree_quota[0].id,
+ fs_id)
+ LOG.info(msg)
+ return all_tree_quota[0].id
+ else:
+ return None
+
+ def create_quota_tree(self, fs_id, soft_limit, hard_limit, unit, path, description):
+ """
+ Create quota tree of a filesystem.
+ :param fs_id: ID of filesystem where quota tree is to be created.
+ :param soft_limit: Soft limit
+ :param hard_limit: Hard limit
+ :param unit: Unit of soft limit and hard limit
+ :param path: Path to quota tree
+ :param description: Description for quota tree
+ :return: Dict containing new quota tree details.
+ """
+
+ if soft_limit is None and hard_limit is None:
+ errormsg = "Both soft limit and hard limit cannot be empty. " \
+ "Please provide atleast one to create quota tree."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ soft_limit_in_bytes = utils.get_size_bytes(soft_limit, unit)
+ hard_limit_in_bytes = utils.get_size_bytes(hard_limit, unit)
+ try:
+ obj_tree_quota = self.unity_conn.create_tree_quota(filesystem_id=fs_id, hard_limit=hard_limit_in_bytes,
+ soft_limit=soft_limit_in_bytes, path=path,
+ description=description)
+ LOG.info("Successfully created quota tree")
+
+ if obj_tree_quota:
+ return obj_tree_quota
+ else:
+ return None
+
+ except Exception as e:
+ errormsg = "Create quota tree operation at path {0} failed in filesystem {1}" \
+ " with error {2}".format(path, fs_id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_filesystem_tree_quota_display_attributes(self, tree_quota_id):
+ """Display quota tree attributes
+ :param tree_quota_id: Quota tree ID
+ :return: Quota tree dict to display
+ """
+ try:
+ tree_quota_obj = self.unity_conn.get_tree_quota(_id=tree_quota_id)
+ tree_quota_details = tree_quota_obj._get_properties()
+ if tree_quota_obj and tree_quota_obj.existed:
+ tree_quota_details['soft_limit'] = utils. \
+ convert_size_with_unit(int(tree_quota_details['soft_limit']))
+ tree_quota_details['hard_limit'] = utils. \
+ convert_size_with_unit(int(tree_quota_details['hard_limit']))
+
+ tree_quota_details['filesystem']['UnityFileSystem']['name'] = \
+ tree_quota_obj.filesystem.name
+ tree_quota_details['filesystem']['UnityFileSystem'].update(
+ {'nas_server': {'name': tree_quota_obj.filesystem.nas_server.name,
+ 'id': tree_quota_obj.filesystem.nas_server.id}})
+ return tree_quota_details
+
+ except Exception as e:
+ errormsg = "Failed to display quota tree details {0} with " \
+ "error {1}".format(tree_quota_obj.id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_filesystem(self, nas_server=None, name=None, id=None):
+ """
+ Get filesystem details.
+ :param nas_server: Nas server object.
+ :param name: Name of filesystem.
+ :param id: ID of filesystem.
+ :return: Dict containing filesystem details if it exists.
+ """
+ id_or_name = id if id else name
+ try:
+ obj_fs = None
+ if name:
+ if not nas_server:
+ err_msg = "NAS Server is required to get the FileSystem."
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+ obj_fs = self.unity_conn.get_filesystem(name=name,
+ nas_server=nas_server)
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem object %s.",
+ obj_fs)
+ return obj_fs
+ if id:
+ if nas_server:
+ obj_fs = self.unity_conn \
+ .get_filesystem(id=id, nas_server=nas_server)
+ else:
+ obj_fs = self.unity_conn.get_filesystem(id=id)
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem object %s.",
+ obj_fs)
+ return obj_fs
+ except Exception as e:
+ error_msg = "Failed to get filesystem %s with error %s." \
+ % (id_or_name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_nas_server_obj(self, name=None, id=None):
+ """
+ Get nas server details.
+ :param name: Nas server name.
+ :param id: Nas server ID.
+ :return: Dict containing nas server details if it exists.
+ """
+ nas_server = id if id else name
+ error_msg = ("Failed to get NAS server %s." % nas_server)
+ try:
+ obj_nas = self.unity_conn.get_nas_server(_id=id, name=name)
+ if name and obj_nas.existed:
+ LOG.info("Successfully got the NAS server object %s.",
+ obj_nas)
+ return obj_nas
+ elif id and obj_nas.existed:
+ LOG.info("Successfully got the NAS server object %s.",
+ obj_nas)
+ return obj_nas
+ else:
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+ except Exception as e:
+ error_msg = "Failed to get NAS server %s with error %s." \
+ % (nas_server, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def modify_tree_quota(self, tree_quota_id, soft_limit, hard_limit, unit, description):
+ """
+ Modify quota tree of filesystem.
+ :param tree_quota_id: ID of the quota tree
+ :param soft_limit: Soft limit
+ :param hard_limit: Hard limit
+ :param unit: Unit of soft limit and hard limit
+ :param description: Description of quota tree
+ :return: Boolean value whether modify quota tree operation is successful.
+ """
+ try:
+ if soft_limit is None and hard_limit is None:
+ return False
+ tree_quota_obj = self.unity_conn.get_tree_quota(tree_quota_id)._get_properties()
+ if soft_limit is None:
+ soft_limit_in_bytes = tree_quota_obj['soft_limit']
+ else:
+ soft_limit_in_bytes = utils.get_size_bytes(soft_limit, unit)
+ if hard_limit is None:
+ hard_limit_in_bytes = tree_quota_obj['hard_limit']
+ else:
+ hard_limit_in_bytes = utils.get_size_bytes(hard_limit, unit)
+
+ if description is None:
+ description = tree_quota_obj['description']
+
+ if tree_quota_obj:
+ if tree_quota_obj['soft_limit'] == soft_limit_in_bytes and \
+ tree_quota_obj['hard_limit'] == hard_limit_in_bytes and \
+ tree_quota_obj['description'] == description:
+ return False
+ else:
+ modify_tree_quota = self.unity_conn.modify_tree_quota(tree_quota_id=tree_quota_id,
+ hard_limit=hard_limit_in_bytes,
+ soft_limit=soft_limit_in_bytes,
+ description=description)
+ LOG.info("Successfully modified quota tree")
+ if modify_tree_quota:
+ return True
+ except Exception as e:
+ errormsg = "Modify quota tree operation {0} failed" \
+ " with error {1}".format(tree_quota_id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def delete_tree_quota(self, tree_quota_id):
+ """
+ Delete quota tree of a filesystem.
+ :param tree_quota_id: ID of quota tree
+ :return: Boolean whether quota tree is deleted
+ """
+
+ try:
+ delete_tree_quota_obj = self.unity_conn.delete_tree_quota(tree_quota_id=tree_quota_id)
+
+ if delete_tree_quota_obj:
+ return True
+
+ except Exception as e:
+ errormsg = "Delete operation of quota tree id:{0} " \
+ "failed with error {1}".format(tree_quota_id,
+ str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on quota tree module based on parameters
+ passed in the playbook
+ """
+ filesystem_id = self.module.params['filesystem_id']
+ filesystem_name = self.module.params['filesystem_name']
+ nas_server_name = self.module.params['nas_server_name']
+ nas_server_id = self.module.params['nas_server_id']
+ cap_unit = self.module.params['cap_unit']
+ state = self.module.params['state']
+ hard_limit = self.module.params['hard_limit']
+ soft_limit = self.module.params['soft_limit']
+ path = self.module.params['path']
+ description = self.module.params['description']
+ tree_quota_id = self.module.params['tree_quota_id']
+ create_tree_quota_obj = None
+ nas_server_resource = None
+ fs_id = None
+
+ '''
+ result is a dictionary to contain end state and quota tree details
+ '''
+ result = dict(
+ changed=False,
+ create_tree_quota=False,
+ modify_tree_quota=False,
+ get_tree_quota_details={},
+ delete_tree_quota=False
+
+ )
+
+ if (soft_limit or hard_limit) and cap_unit is None:
+ cap_unit = 'GB'
+
+ if soft_limit and utils.is_size_negative(soft_limit):
+ error_message = "Invalid soft_limit provided, " \
+ "must be greater than or equal to 0"
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ if hard_limit and utils.is_size_negative(hard_limit):
+ error_message = "Invalid hard_limit provided, " \
+ "must be greater than or equal to 0"
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ '''
+ Get NAS server Object
+ '''
+
+ if nas_server_name is not None:
+ if utils.is_input_empty(nas_server_name):
+ self.module.fail_json(msg="Invalid nas_server_name given,"
+ " Please provide a valid name.")
+ nas_server_resource = self \
+ .get_nas_server_obj(name=nas_server_name)
+ elif nas_server_id is not None:
+ if utils.is_input_empty(nas_server_id):
+ self.module.fail_json(msg="Invalid nas_server_id given,"
+ " Please provide a valid ID.")
+ nas_server_resource = self.get_nas_server_obj(id=nas_server_id)
+
+ '''
+ Get filesystem Object
+ '''
+ if filesystem_name is not None:
+ if utils.is_input_empty(filesystem_name):
+ self.module.fail_json(msg="Invalid filesystem_name given,"
+ " Please provide a valid name.")
+ filesystem_obj = self \
+ .get_filesystem(nas_server=nas_server_resource,
+ name=filesystem_name)
+ fs_id = filesystem_obj.id
+ elif filesystem_id is not None:
+ if utils.is_input_empty(filesystem_id):
+ self.module.fail_json(msg="Invalid filesystem_id given,"
+ " Please provide a valid ID.")
+ filesystem_obj = self \
+ .get_filesystem(id=filesystem_id)
+ if filesystem_obj:
+ fs_id = filesystem_obj[0].id
+ else:
+ self.module.fail_json(msg="Filesystem does not exist.")
+
+ '''
+ Validate path to quota tree
+ '''
+ if path is not None:
+ if utils.is_input_empty(path):
+ self.module.fail_json(msg=" Please provide a valid path.")
+ elif not path.startswith('/'):
+ self.module.fail_json(msg="The path is relative to the root of the file system "
+ "and must start with a forward slash '/'.")
+
+ if filesystem_id is None and filesystem_name is None:
+ self.module.fail_json(msg="Please provide either filesystem_name or fileystem_id.")
+
+ quota_tree_id_present = self.check_quota_tree_is_present(fs_id, path, tree_quota_id)
+ tree_quota_id = quota_tree_id_present
+
+ '''
+ Create quota tree
+ '''
+
+ if (filesystem_id or filesystem_name) and path is not None and state == "present":
+ if not tree_quota_id:
+ LOG.info("Creating quota tree")
+ create_tree_quota_obj = self.create_quota_tree(fs_id, soft_limit, hard_limit,
+ cap_unit, path, description)
+
+ if create_tree_quota_obj:
+ tree_quota_id = create_tree_quota_obj.id
+ result['create_tree_quota'] = True
+
+ '''
+ Modify quota tree
+ '''
+
+ if tree_quota_id and state == "present":
+ LOG.info("Modifying quota tree")
+ result['modify_tree_quota'] = self.modify_tree_quota(tree_quota_id, soft_limit, hard_limit, cap_unit,
+ description)
+
+ '''
+ Delete quota tree
+ '''
+
+ if tree_quota_id is not None and state == "absent":
+ LOG.info("Deleting quota tree")
+ result['delete_tree_quota'] = self.delete_tree_quota(tree_quota_id)
+
+ '''
+ Get quota tree details
+ '''
+ if state == "present" and tree_quota_id is not None:
+ result['get_tree_quota_details'] = self.get_filesystem_tree_quota_display_attributes(tree_quota_id)
+ else:
+ result['get_tree_quota_details'] = {}
+
+ if result['create_tree_quota'] or result['modify_tree_quota'] or result['delete_tree_quota']:
+ result['changed'] = True
+
+ self.module.exit_json(**result)
+
+
+def get_quota_tree_parameters():
+ """This method provide parameters required for the ansible
+ quota tree module on Unity"""
+ return dict(
+ filesystem_id=dict(required=False, type='str'),
+ filesystem_name=dict(required=False, type='str'),
+ state=dict(required=True, type='str', choices=['present', 'absent']),
+ hard_limit=dict(required=False, type='int'),
+ soft_limit=dict(required=False, type='int'),
+ cap_unit=dict(required=False, type='str', choices=['MB', 'GB', 'TB']),
+ tree_quota_id=dict(required=False, type='str'),
+ nas_server_name=dict(required=False, type='str'),
+ nas_server_id=dict(required=False, type='str'),
+ path=dict(required=False, type='str', no_log=True),
+ description=dict(required=False, type='str')
+ )
+
+
+def main():
+ """ Create Unity quota tree object and perform action on it
+ based on user input from playbook"""
+ obj = QuotaTree()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/user_quota.py b/ansible_collections/dellemc/unity/plugins/modules/user_quota.py
new file mode 100644
index 000000000..d9116c3a5
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/user_quota.py
@@ -0,0 +1,1012 @@
+#!/usr/bin/python
+# Copyright: (c) 2021, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing User Quota on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: user_quota
+short_description: Manage user quota on the Unity storage system
+description:
+- Managing User Quota on the Unity storage system includes
+ Create user quota,
+ Get user quota,
+ Modify user quota,
+ Delete user quota,
+ Create user quota for quota tree,
+ Modify user quota for quota tree and
+ Delete user quota for quota tree.
+version_added: '1.2.0'
+extends_documentation_fragment:
+ - dellemc.unity.unity
+author:
+- Spandita Panigrahi (@panigs7) <ansible.team@dell.com>
+options:
+ filesystem_name:
+ description:
+ - The name of the filesystem for which the user quota is created.
+ - For creation of a user quota either I(filesystem_name) or
+ I(filesystem_id) is required.
+ type: str
+ filesystem_id:
+ description:
+ - The ID of the filesystem for which the user quota is created.
+ - For creation of a user quota either I(filesystem_id) or
+ I(filesystem_name) is required.
+ type: str
+ nas_server_name:
+ description:
+ - The name of the NAS server in which the filesystem is created.
+ - For creation of a user quota either I(nas_server_name) or
+ I(nas_server_id) is required.
+ type: str
+ nas_server_id:
+ description:
+ - The ID of the NAS server in which the filesystem is created.
+ - For creation of a user quota either I(filesystem_id) or
+ I(filesystem_name) is required.
+ type: str
+ hard_limit:
+ description:
+ - Hard limitation for a user on the total space available. If exceeded, user cannot write data.
+ - Value C(0) implies no limit.
+ - One of the values of I(soft_limit) and I(hard_limit) can be C(0), however, both cannot be C(0)
+ during creation or modification of user quota.
+ type: int
+ soft_limit:
+ description:
+ - Soft limitation for a user on the total space available. If exceeded,
+ notification will be sent to the user for the grace period mentioned, beyond
+ which the user cannot use space.
+ - Value C(0) implies no limit.
+ - Both I(soft_limit) and I(hard_limit) cannot be C(0) during creation or modification
+ of user quota.
+ type: int
+ cap_unit:
+ description:
+ - Unit of I(soft_limit) and I(hard_limit) size.
+ - It defaults to C(GB) if not specified.
+ choices: ['MB', 'GB', 'TB']
+ type: str
+ user_type:
+ description:
+ - Type of user creating a user quota.
+ - Mandatory while creating or modifying user quota.
+ choices: ['Unix', 'Windows']
+ type: str
+ win_domain:
+ description:
+ - Fully qualified or short domain name for Windows user type.
+ - Mandatory when I(user_type) is C(Windows).
+ type: str
+ user_name:
+ description:
+ - User name of the user quota when I(user_type) is C(Windows) or C(Unix).
+ - Option I(user_name) must be specified along with I(win_domain) when I(user_type) is C(Windows).
+ type: str
+ uid:
+ description:
+ - User ID of the user quota.
+ type: str
+ user_quota_id:
+ description:
+ - User quota ID generated after creation of a user quota.
+ type: str
+ tree_quota_id:
+ description:
+ - The ID of the quota tree.
+ - Either I(tree_quota_id) or I(path) to quota tree is required to
+ create/modify/delete user quota for a quota tree.
+ type: str
+ path:
+ description:
+ - The path to the quota tree.
+ - Either I(tree_quota_id) or I(path) to quota tree is required to
+ create/modify/delete user quota for a quota tree.
+ - Path must start with a forward slash '/'.
+ type: str
+ state:
+ description:
+ - The I(state) option is used to mention the existence of the user quota.
+ type: str
+ required: true
+ choices: ['absent', 'present']
+
+notes:
+ - The I(check_mode) is not supported.
+'''
+
+EXAMPLES = r'''
+ - name: Get user quota details by user quota id
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ user_quota_id: "userquota_171798700679_0_123"
+ state: "present"
+
+ - name: Get user quota details by user quota uid/user name
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "fs_2171"
+ nas_server_id: "nas_21"
+ user_name: "test"
+ state: "present"
+
+ - name: Create user quota for a filesystem with filesystem id
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ hard_limit: 6
+ cap_unit: "TB"
+ soft_limit: 5
+ uid: "111"
+ state: "present"
+
+ - name: Create user quota for a filesystem with filesystem name
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_name: "Test_filesystem"
+ nas_server_name: "lglad068"
+ hard_limit: 6
+ cap_unit: "TB"
+ soft_limit: 5
+ uid: "111"
+ state: "present"
+
+ - name: Modify user quota limit usage by user quota id
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ user_quota_id: "userquota_171798700679_0_123"
+ hard_limit: 10
+ cap_unit: "TB"
+ soft_limit: 8
+ state: "present"
+
+ - name: Modify user quota by filesystem id and user quota uid/user_name
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ user_type: "Windows"
+ win_domain: "prod"
+ user_name: "sample"
+ hard_limit: 12
+ cap_unit: "TB"
+ soft_limit: 10
+ state: "present"
+
+ - name: Delete user quota
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ win_domain: "prod"
+ user_name: "sample"
+ state: "absent"
+
+ - name: Create user quota of a quota tree
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ tree_quota_id: "treequota_171798700679_4"
+ user_type: "Windows"
+ win_domain: "prod"
+ user_name: "sample"
+ soft_limit: 9
+ cap_unit: "TB"
+ state: "present"
+
+ - name: Create user quota of a quota tree by quota tree path
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ path: "/sample"
+ user_type: "Unix"
+ user_name: "test"
+ hard_limit: 2
+ cap_unit: "TB"
+ state: "present"
+
+ - name: Modify user quota of a quota tree
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ tree_quota_id: "treequota_171798700679_4"
+ user_type: "Windows"
+ win_domain: "prod"
+ user_name: "sample"
+ soft_limit: 10
+ cap_unit: "TB"
+ state: "present"
+
+ - name: Modify user quota of a quota tree by quota tree path
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ path: "/sample"
+ user_type: "Windows"
+ win_domain: "prod"
+ user_name: "sample"
+ hard_limit: 12
+ cap_unit: "TB"
+ state: "present"
+
+ - name: Delete user quota of a quota tree by quota tree path
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ filesystem_id: "fs_2171"
+ path: "/sample"
+ win_domain: "prod"
+ user_name: "sample"
+ state: "absent"
+
+ - name: Delete user quota of a quota tree by quota tree id
+ dellemc.unity.user_quota:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ tree_quota_id: "treequota_171798700679_4"
+ win_domain: "prod"
+ user_name: "sample"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: True
+
+get_user_quota_details:
+ description: Details of the user quota.
+ returned: When user quota exists
+ type: dict
+ contains:
+ filesystem:
+ description: Filesystem details for which the user quota is
+ created.
+ type: dict
+ contains:
+ UnityFileSystem:
+ description: Filesystem details for which the
+ user quota is created.
+ type: dict
+ contains:
+ id:
+ description: ID of the filesystem for
+ which the user quota is created.
+ type: str
+ name:
+ description: Name of filesystem.
+ type: str
+ nas_server:
+ description: Nasserver details where
+ filesystem is created.
+ type: dict
+ contains:
+ name:
+ description: Name of nasserver.
+ type: str
+ id:
+ description: ID of nasserver.
+ type: str
+ tree_quota:
+ description: Quota tree details for which the user quota is
+ created.
+ type: dict
+ contains:
+ UnityTreeQuota:
+ description: Quota tree details for which the user
+ quota is created.
+ type: dict
+ contains:
+ id:
+ description: ID of the quota tree.
+ type: str
+ path:
+ description: Path to quota tree.
+ type: str
+ gp_left:
+ description: The grace period left after the soft limit
+ for the user quota is exceeded.
+ type: int
+ hard_limit:
+ description: Hard limitation for a user on the total space
+ available. If exceeded, user cannot write data.
+ type: int
+ hard_ratio:
+ description: The hard ratio is the ratio between the
+ hard limit size of the user quota
+ and the amount of storage actually consumed.
+ type: str
+ soft_limit:
+ description: Soft limitation for a user on the total space
+ available. If exceeded, notification will be
+ sent to user for the grace period mentioned, beyond
+ which user cannot use space.
+ type: int
+ soft_ratio:
+ description: The soft ratio is the ratio between
+ the soft limit size of the user quota
+ and the amount of storage actually consumed.
+ type: str
+ id:
+ description: User quota ID.
+ type: str
+ size_used:
+ description: Size of used space in the filesystem
+ by the user files.
+ type: int
+ state:
+ description: State of the user quota.
+ type: int
+ uid:
+ description: User ID of the user.
+ type: int
+ unix_name:
+ description: Unix user name for this user quota's uid.
+ type: str
+ windows_names:
+ description: Windows user name that maps to this quota's uid.
+ type: str
+ windows_sids:
+ description: Windows SIDs that maps to this quota's uid
+ type: str
+ sample: {
+ "existed": true,
+ "filesystem": {
+ "UnityFileSystem": {
+ "hash": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
+ "id": "fs_120",
+ "name": "nfs-multiprotocol",
+ "nas_server": {
+ "id": "nas_1",
+ "name": "lglad072"
+ }
+ }
+ },
+ "gp_left": null,
+ "hard_limit": "10.0 GB",
+ "hard_ratio": null,
+ "hash": 8752448438089,
+ "id": "userquota_171798694698_0_60000",
+ "size_used": 0,
+ "soft_limit": "10.0 GB",
+ "soft_ratio": null,
+ "state": 0,
+ "tree_quota": null,
+ "uid": 60000,
+ "unix_name": null,
+ "windows_names": null,
+ "windows_sids": null
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('user_quota')
+
+application_type = "Ansible/1.6.0"
+
+
+class UserQuota(object):
+ """Class with User Quota operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_user_quota_parameters())
+
+ mutually_exclusive = [['user_name', 'uid'], ['uid', 'win_domain'],
+ ['filesystem_name', 'filesystem_id'],
+ ['nas_server_name', 'nas_server_id'],
+ ['user_name', 'user_quota_id'],
+ ['uid', 'user_quota_id']]
+
+ required_if = [('user_type', 'Windows', ['win_domain', 'user_name'], False),
+ ('user_type', 'Unix', ['user_name'], False)]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if)
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+
+ def check_user_is_present(self, fs_id, uid, unix, win_name, user_quota_id):
+ """
+ Check if user quota is present in filesystem.
+ :param fs_id: ID of filesystem where user quota is searched.
+ :param uid: UID of the user quota
+ :param unix: Unix user name of user quota
+ :param win_name: Windows user name of user quota
+ :param user_quota_id: ID of the user quota
+ :return: ID of user quota if it exists else None.
+ """
+
+ if not self.check_user_type_provided(win_name, uid, unix):
+ return None
+
+ user_name_or_uid_or_id = unix if unix else win_name if win_name else uid if \
+ uid else user_quota_id
+
+ # All user quotas in the given filesystem
+ all_user_quota = self.unity_conn.get_user_quota(filesystem=fs_id, id=user_quota_id,
+ unix_name=unix, windows_names=win_name,
+ uid=uid)
+
+ for user_quota in range(len(all_user_quota)):
+
+ if all_user_quota[user_quota].tree_quota is None:
+ msg = "User quota %s with id %s " \
+ "is present in filesystem %s" \
+ % (user_name_or_uid_or_id, all_user_quota[user_quota].id, fs_id)
+ LOG.info(msg)
+ return all_user_quota[user_quota].id
+
+ return None
+
+ def check_quota_tree_is_present(self, fs_id, path, tree_quota_id):
+ """
+ Check if quota tree is present in filesystem.
+ :param fs_id: ID of filesystem where quota tree is searched.
+ :param path: Path to quota tree
+ :param tree_quota_id: ID of the quota tree
+ :return: ID of quota tree if it exists.
+ """
+
+ path_or_id = path if path else tree_quota_id
+ tree_quota_obj = self.unity_conn.get_tree_quota(filesystem=fs_id, path=path,
+ id=tree_quota_id)
+ if len(tree_quota_obj) > 0:
+ msg = "Tree quota id %s present in filesystem %s" % (tree_quota_obj[0].id, fs_id)
+ LOG.info(msg)
+ return tree_quota_obj[0].id
+ else:
+ errormsg = "The quota tree '%s' does not exist" % path_or_id
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def check_user_quota_in_quota_tree(self, tree_quota_id, uid, unix, win_name, user_quota_id):
+ """
+ Check if user quota is present in quota tree.
+ :param tree_quota_id: ID of quota tree where user quota is searched.
+ :param uid: UID of user quota
+ :param unix: Unix name of user quota
+ :param win_name: Windows name of user quota
+ :param user_quota_id: ID of the user quota
+ :return: ID of user quota if it exists in quota tree else None.
+ """
+ if not self.check_user_type_provided(win_name, uid, unix):
+ return None
+
+ user_quota_name = uid if uid else unix if unix else win_name \
+ if win_name else user_quota_id
+ user_quota_obj = self.unity_conn.get_user_quota(tree_quota=tree_quota_id,
+ uid=uid, windows_names=win_name,
+ unix_name=unix,
+ id=user_quota_id)
+ if len(user_quota_obj) > 0:
+ msg = "User quota %s is present in quota tree %s " % (user_quota_name, tree_quota_id)
+ LOG.info(msg)
+ return user_quota_obj[0].id
+ else:
+ return None
+
+ def create_user_quota(self, fs_id, soft_limit, hard_limit, unit, uid, unix, win_name, tree_quota_id):
+ """
+ Create user quota of a filesystem.
+ :param fs_id: ID of filesystem where user quota is to be created.
+ :param soft_limit: Soft limit
+ :param hard_limit: Hard limit
+ :param unit: Unit of soft limit and hard limit
+ :param uid: UID of the user quota
+ :param unix: Unix user name of user quota
+ :param win_name: Windows user name of user quota
+ :param tree_quota_id: ID of tree quota
+ :return: Object containing new user quota details.
+ """
+
+ unix_or_uid_or_win = uid if uid else unix if unix else win_name
+ fs_id_or_tree_quota_id = fs_id if fs_id else tree_quota_id
+ if soft_limit is None and hard_limit is None:
+ errormsg = "Both soft limit and hard limit cannot be empty. " \
+ "Please provide atleast one to create user quota."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ soft_limit_in_bytes = utils.get_size_bytes(soft_limit, unit)
+ hard_limit_in_bytes = utils.get_size_bytes(hard_limit, unit)
+ try:
+ if self.check_user_type_provided(win_name, uid, unix):
+ obj_user_quota = self.unity_conn.create_user_quota(filesystem_id=fs_id,
+ hard_limit=hard_limit_in_bytes,
+ soft_limit=soft_limit_in_bytes,
+ uid=uid, unix_name=unix,
+ win_name=win_name,
+ tree_quota_id=tree_quota_id)
+ LOG.info("Successfully created user quota")
+ return obj_user_quota
+
+ except Exception as e:
+ errormsg = "Create quota for user {0} on {1} , failed with error {2} "\
+ .format(unix_or_uid_or_win, fs_id_or_tree_quota_id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_filesystem_user_quota_display_attributes(self, user_quota_id):
+ """Get display user quota attributes
+ :param user_quota_id: User quota ID
+ :return: User quota dict to display
+ """
+ try:
+ user_quota_obj = self.unity_conn.get_user_quota(user_quota_id)
+ user_quota_details = user_quota_obj._get_properties()
+
+ if user_quota_obj and user_quota_obj.existed:
+ user_quota_details['soft_limit'] = utils. \
+ convert_size_with_unit(int(user_quota_details['soft_limit']))
+ user_quota_details['hard_limit'] = utils. \
+ convert_size_with_unit(int(user_quota_details['hard_limit']))
+
+ user_quota_details['filesystem']['UnityFileSystem']['name'] = \
+ user_quota_obj.filesystem.name
+ user_quota_details['filesystem']['UnityFileSystem'].update(
+ {'nas_server': {'name': user_quota_obj.filesystem.nas_server.name,
+ 'id': user_quota_obj.filesystem.nas_server.id}})
+
+ if user_quota_obj.tree_quota:
+ user_quota_details['tree_quota']['UnityTreeQuota']['path'] = \
+ user_quota_obj.tree_quota.path
+
+ return user_quota_details
+ else:
+ errormsg = "User quota does not exist."
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ except Exception as e:
+ errormsg = "Failed to display the details of user quota {0} with " \
+ "error {1}".format(user_quota_obj.id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_filesystem(self, nas_server=None, name=None, id=None):
+ """
+ Get filesystem details.
+ :param nas_server: Nas server object.
+ :param name: Name of filesystem.
+ :param id: ID of filesystem.
+ :return: Object containing filesystem details if it exists.
+ """
+ id_or_name = id if id else name
+ try:
+ obj_fs = None
+ if name:
+ if not nas_server:
+ err_msg = "NAS Server is required to get the FileSystem."
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+ obj_fs = self.unity_conn.get_filesystem(name=name,
+ nas_server=nas_server)
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem object %s.",
+ obj_fs)
+ return obj_fs
+ if id:
+ if nas_server:
+ obj_fs = self.unity_conn \
+ .get_filesystem(id=id, nas_server=nas_server)
+ else:
+ obj_fs = self.unity_conn.get_filesystem(id=id)
+ if obj_fs and obj_fs.existed:
+ LOG.info("Successfully got the filesystem object %s.",
+ obj_fs)
+ return obj_fs
+ except Exception as e:
+ error_msg = "Failed to get filesystem %s with error %s." \
+ % (id_or_name, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_nas_server_obj(self, name=None, id=None):
+ """
+ Get nas server details.
+ :param name: Nas server name.
+ :param id: Nas server ID.
+ :return: Object containing nas server details if it exists.
+ """
+ nas_server = id if id else name
+ error_msg = ("Failed to get NAS server %s." % nas_server)
+ try:
+ obj_nas = self.unity_conn.get_nas_server(_id=id, name=name)
+ if name and obj_nas.existed:
+ LOG.info("Successfully got the NAS server object %s.",
+ obj_nas)
+ return obj_nas
+ elif id and obj_nas.existed:
+ LOG.info("Successfully got the NAS server object %s.",
+ obj_nas)
+ return obj_nas
+ else:
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+ except Exception as e:
+ error_msg = "Failed to get NAS server %s with error %s." \
+ % (nas_server, str(e))
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def modify_user_quota(self, user_quota_id, soft_limit, hard_limit, unit):
+ """
+ Modify user quota of filesystem by its uid/username/user quota id.
+ :param user_quota_id: ID of the user quota
+ :param soft_limit: Soft limit
+ :param hard_limit: Hard limit
+ :param unit: Unit of soft limit and hard limit
+ :return: Boolean value whether modify user quota operation is successful.
+ """
+
+ if soft_limit is None and hard_limit is None:
+ return False
+
+ user_quota_obj = self.unity_conn.get_user_quota(user_quota_id)._get_properties()
+
+ if soft_limit is None:
+ soft_limit_in_bytes = user_quota_obj['soft_limit']
+ else:
+ soft_limit_in_bytes = utils.get_size_bytes(soft_limit, unit)
+
+ if hard_limit is None:
+ hard_limit_in_bytes = user_quota_obj['hard_limit']
+ else:
+ hard_limit_in_bytes = utils.get_size_bytes(hard_limit, unit)
+
+ if user_quota_obj:
+ if user_quota_obj['soft_limit'] == soft_limit_in_bytes and \
+ user_quota_obj['hard_limit'] == hard_limit_in_bytes:
+ return False
+ else:
+ error_msg = "The user quota does not exist."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ try:
+ obj_user_quota = self.unity_conn.modify_user_quota(user_quota_id=user_quota_id,
+ hard_limit=hard_limit_in_bytes,
+ soft_limit=soft_limit_in_bytes)
+ LOG.info("Successfully modified user quota")
+ if obj_user_quota:
+ return True
+ except Exception as e:
+ errormsg = "Modify user quota {0} failed" \
+ " with error {1}".format(user_quota_id, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def check_user_type_provided(self, win_name, uid, unix_name):
+ """Checks if user type or uid is provided
+ :param win_name: Windows name of user quota
+ :param uid: UID of user quota
+ :param unix_name: Unix name of user quota"""
+ if win_name is None and uid is None and unix_name is None:
+ return False
+ else:
+ return True
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on user quota module based on parameters
+ passed in the playbook
+ """
+ filesystem_id = self.module.params['filesystem_id']
+ filesystem_name = self.module.params['filesystem_name']
+ nas_server_name = self.module.params['nas_server_name']
+ nas_server_id = self.module.params['nas_server_id']
+ cap_unit = self.module.params['cap_unit']
+ state = self.module.params['state']
+ user_quota_id = self.module.params['user_quota_id']
+ hard_limit = self.module.params['hard_limit']
+ soft_limit = self.module.params['soft_limit']
+ user_type = self.module.params['user_type']
+ uid = self.module.params['uid']
+ user_name = self.module.params['user_name']
+ win_domain = self.module.params['win_domain']
+ tree_quota_id = self.module.params['tree_quota_id']
+ path = self.module.params['path']
+ create_user_quota_obj = None
+ win_name = None
+ unix_name = None
+ nas_server_resource = None
+ fs_id = None
+ user_quota_details = ''
+ filesystem_obj = None
+
+ '''
+ result is a dictionary to contain end state and user quota details
+ '''
+ result = dict(
+ changed=False,
+ create_user_quota=False,
+ modify_user_quota=False,
+ get_user_quota_details={},
+ delete_user_quota=False
+ )
+
+ if (soft_limit or hard_limit) and cap_unit is None:
+ cap_unit = 'GB'
+
+ if soft_limit == 0 and hard_limit == 0:
+ error_message = 'Both soft limit and hard limit cannot be unlimited'
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ if soft_limit and utils.is_size_negative(soft_limit):
+ error_message = "Invalid soft_limit provided, " \
+ "must be greater than 0"
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ if hard_limit and utils.is_size_negative(hard_limit):
+ error_message = "Invalid hard_limit provided, " \
+ "must be greater than 0"
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ if (user_type or uid) and filesystem_id is None and \
+ filesystem_name is None and tree_quota_id is None:
+ error_message = 'Please provide either ' \
+ 'filesystem_name or filesystem_id'
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ if (nas_server_name or nas_server_id) \
+ and (filesystem_id is None and filesystem_name is None):
+ error_message = 'Please provide either ' \
+ 'filesystem_name or filesystem_id'
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ '''
+ Validate path to quota tree
+ '''
+ if path is not None:
+ if utils.is_input_empty(path):
+ self.module.fail_json(msg=" Please provide a valid path.")
+ elif not path.startswith('/'):
+ self.module.fail_json(msg="The path is relative to the root of the file system "
+ "and must start with a forward slash.")
+
+ if filesystem_id is None and filesystem_name is None:
+ self.module.fail_json(msg="Please provide either filesystem_name or fileystem_id.")
+
+ if user_type and filesystem_id is None and filesystem_name is None and tree_quota_id is None:
+ error_message = 'Please provide either ' \
+ 'filesystem_name or filesystem_id to create user quota for a' \
+ 'filesystem. Or provide tree_quota_id to create user quota for a quota tree.'
+ LOG.error(error_message)
+ self.module.fail_json(msg=error_message)
+
+ '''
+ Get NAS server Object
+ '''
+
+ if nas_server_name is not None:
+ if utils.is_input_empty(nas_server_name):
+ self.module.fail_json(msg="Invalid nas_server_name given,"
+ " Please provide a valid name.")
+ nas_server_resource = self \
+ .get_nas_server_obj(name=nas_server_name)
+ elif nas_server_id is not None:
+ if utils.is_input_empty(nas_server_id):
+ self.module.fail_json(msg="Invalid nas_server_id given,"
+ " Please provide a valid ID.")
+ nas_server_resource = self.get_nas_server_obj(id=nas_server_id)
+
+ '''
+ Get filesystem Object
+ '''
+ if filesystem_name is not None:
+ if utils.is_input_empty(filesystem_name):
+ self.module.fail_json(msg="Invalid filesystem_name given,"
+ " Please provide a valid name.")
+ filesystem_obj = self \
+ .get_filesystem(nas_server=nas_server_resource,
+ name=filesystem_name)
+ fs_id = filesystem_obj.id
+ elif filesystem_id is not None:
+ if utils.is_input_empty(filesystem_id):
+ self.module.fail_json(msg="Invalid filesystem_id given,"
+ " Please provide a valid ID.")
+ filesystem_obj = self \
+ .get_filesystem(id=filesystem_id)
+ if filesystem_obj:
+ filesystem_obj = filesystem_obj[0]
+ fs_id = filesystem_obj.id
+ else:
+ self.module.fail_json(msg="Filesystem does not exist.")
+
+ if (user_name or win_domain) and (soft_limit or hard_limit) \
+ and user_type is None:
+ self.module.fail_json(msg="Invalid user_type given,"
+ " Please provide a valid user_type.")
+
+ # Check the sharing protocol supported by the filesystem
+ # while creating a user quota
+ if filesystem_obj and (soft_limit is not None or hard_limit is not None):
+ supported_protocol = filesystem_obj.supported_protocols
+
+ if supported_protocol == utils.FSSupportedProtocolEnum["CIFS"] \
+ and (user_type == "Unix" or uid):
+ self.module.fail_json(msg="This filesystem supports only SMB protocol "
+ "and applicable only for windows users. "
+ "Please provide valid windows details.")
+ elif supported_protocol == utils.FSSupportedProtocolEnum["NFS"] \
+ and user_type == "Windows":
+ self.module.fail_json(msg="This filesystem supports only NFS protocol "
+ "and applicable only for unix users. "
+ "Please provide valid uid or unix details.")
+
+ '''
+ Validate user type or uid
+ '''
+ if uid and (utils.is_input_empty(uid) or not uid.isnumeric()):
+ self.module.fail_json(msg=" UID is empty. Please provide valid UID.")
+ if user_type:
+ if user_type == "Unix":
+ if user_name is None or utils.is_input_empty(user_name):
+ self.module.fail_json(msg=" 'user_name' is empty. Please provide valid user_name.")
+
+ if user_type == "Windows":
+ if win_domain is None or utils.is_input_empty(win_domain):
+ self.module.fail_json(msg=" 'win_domain' is empty. Please provide valid win_domain.")
+ elif user_name is None or utils.is_input_empty(user_name):
+ self.module.fail_json(msg=" 'user_name' is empty. Please provide valid user_name.")
+
+ if user_type != "Unix" and win_domain:
+ win_domain = win_domain.replace(".com", "")
+ win_name = win_domain + '\\' + user_name
+
+ if win_name is None and user_name:
+ unix_name = user_name
+
+ '''
+ Check if quota tree is already present in the filesystem
+ '''
+ if tree_quota_id or path:
+ quota_tree_id_present = self.check_quota_tree_is_present(fs_id, path, tree_quota_id)
+ tree_quota_id = quota_tree_id_present
+
+ '''
+ Check if the user quota is already present in the filesystem/ quota tree
+ '''
+ if tree_quota_id:
+ user_id_present = self.check_user_quota_in_quota_tree(tree_quota_id, uid, unix_name, win_name,
+ user_quota_id)
+ fs_id = None if tree_quota_id is not None else fs_id
+ else:
+ user_id_present = self.check_user_is_present(fs_id, uid, unix_name, win_name, user_quota_id)
+
+ if user_id_present:
+ user_quota_id = user_id_present
+
+ if state == "present":
+ if user_quota_id:
+ # Modify user quota. If no change modify_user_quota is false.
+ result['modify_user_quota'] = self.modify_user_quota(user_quota_id, soft_limit,
+ hard_limit, cap_unit)
+
+ else:
+ LOG.info("Creating user quota")
+ create_user_quota_obj = self.create_user_quota(fs_id, soft_limit, hard_limit,
+ cap_unit, uid, unix_name, win_name,
+ tree_quota_id)
+ if create_user_quota_obj:
+ user_quota_id = create_user_quota_obj.id
+ result['create_user_quota'] = True
+ else:
+ user_quota_id = None
+ '''
+ Deleting user quota.
+ When both soft limit and hard limit are set to 0, it implies the user quota has
+ unlimited quota. Thereby, Unity removes the user quota id.
+ '''
+
+ if state == "absent" and user_quota_id:
+ soft_limit = 0
+ hard_limit = 0
+ err_msg = "Deleting user quota %s" % user_quota_id
+ LOG.info(err_msg)
+ result['delete_user_quota'] = self.modify_user_quota(user_quota_id,
+ soft_limit, hard_limit, cap_unit)
+ '''
+ Get user details
+ '''
+
+ if state == "present" and user_quota_id:
+ user_quota_details = self.get_filesystem_user_quota_display_attributes(user_quota_id)
+
+ result['get_user_quota_details'] = user_quota_details
+ if result['create_user_quota'] or result['modify_user_quota'] or result['delete_user_quota']:
+ result['changed'] = True
+
+ self.module.exit_json(**result)
+
+
+def get_user_quota_parameters():
+ """This method provide parameters required for the ansible filesystem
+ user quota module on Unity"""
+ return dict(
+ filesystem_id=dict(required=False, type='str'),
+ filesystem_name=dict(required=False, type='str'),
+ state=dict(required=True, type='str', choices=['present', 'absent']),
+ user_type=dict(required=False, type='str',
+ choices=['Windows', 'Unix']),
+ user_name=dict(required=False, type='str'),
+ uid=dict(required=False, type='str'),
+ win_domain=dict(required=False, type='str'),
+ hard_limit=dict(required=False, type='int'),
+ soft_limit=dict(required=False, type='int'),
+ cap_unit=dict(required=False, type='str', choices=['MB', 'GB', 'TB']),
+ user_quota_id=dict(required=False, type='str'),
+ nas_server_name=dict(required=False, type='str'),
+ nas_server_id=dict(required=False, type='str'),
+ tree_quota_id=dict(required=False, type='str'),
+ path=dict(required=False, type='str', no_log=True)
+ )
+
+
+def main():
+ """ Create Unity user quota object and perform action on it
+ based on user input from playbook"""
+ obj = UserQuota()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/volume.py b/ansible_collections/dellemc/unity/plugins/modules/volume.py
new file mode 100644
index 000000000..82bcb0174
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/volume.py
@@ -0,0 +1,1277 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing volumes on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+
+module: volume
+version_added: '1.1.0'
+short_description: Manage volume on Unity storage system
+description:
+- Managing volume on Unity storage system includes-
+ Create new volume,
+ Modify volume attributes,
+ Map Volume to host,
+ Unmap volume to host,
+ Display volume details,
+ Delete volume.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Arindam Datta (@arindam-emc) <ansible.team@dell.com>
+- Pavan Mudunuri(@Pavan-Mudunuri) <ansible.team@dell.com>
+
+options:
+ vol_name:
+ description:
+ - The name of the volume. Mandatory only for create operation.
+ type: str
+ vol_id:
+ description:
+ - The id of the volume.
+ - It can be used only for get, modify, map/unmap host, or delete operation.
+ type: str
+ pool_name:
+ description:
+ - This is the name of the pool where the volume will be created.
+ - Either the I(pool_name) or I(pool_id) must be provided to create a new volume.
+ type: str
+ pool_id:
+ description:
+ - This is the id of the pool where the volume will be created.
+ - Either the I(pool_name) or I(pool_id) must be provided to create a new volume.
+ type: str
+ size:
+ description:
+ - The size of the volume.
+ type: int
+ cap_unit:
+ description:
+ - The unit of the volume size. It defaults to C(GB), if not specified.
+ choices: ['GB' , 'TB']
+ type: str
+ description:
+ description:
+ - Description about the volume.
+ - Description can be removed by passing empty string ("").
+ type: str
+ snap_schedule:
+ description:
+ - Snapshot schedule assigned to the volume.
+ - Add/Remove/Modify the snapshot schedule for the volume.
+ type: str
+ compression:
+ description:
+ - Boolean variable, Specifies whether or not to enable compression.
+ Compression is supported only for thin volumes.
+ type: bool
+ advanced_dedup:
+ description:
+ - Boolean variable, Indicates whether or not to enable advanced deduplication.
+ - Compression should be enabled to enable advanced deduplication.
+ - It can only be enabled on the all flash high end platforms.
+ - Deduplicated data will remain as is even after advanced deduplication is disabled.
+ type: bool
+ is_thin:
+ description:
+ - Boolean variable, Specifies whether or not it is a thin volume.
+ - The value is set as C(true) by default if not specified.
+ type: bool
+ sp:
+ description:
+ - Storage Processor for this volume.
+ choices: ['SPA' , 'SPB']
+ type: str
+ io_limit_policy:
+ description:
+ - IO limit policy associated with this volume.
+ Once it is set, it cannot be removed through ansible module but it can
+ be changed.
+ type: str
+ host_name:
+ description:
+ - Name of the host to be mapped/unmapped with this volume.
+ - Either I(host_name) or I(host_id) can be specified in one task along with
+ I(mapping_state).
+ type: str
+ host_id:
+ description:
+ - ID of the host to be mapped/unmapped with this volume.
+ - Either I(host_name) or I(host_id) can be specified in one task along with
+ I(mapping_state).
+ type: str
+ hlu:
+ description:
+ - Host Lun Unit to be mapped/unmapped with this volume.
+ - It is an optional parameter, hlu can be specified along
+ with I(host_name) or I(host_id) and I(mapping_state).
+ - If I(hlu) is not specified, unity will choose it automatically.
+ The maximum value supported is C(255).
+ type: int
+ mapping_state:
+ description:
+ - State of host access for volume.
+ choices: ['mapped' , 'unmapped']
+ type: str
+ new_vol_name:
+ description:
+ - New name of the volume for rename operation.
+ type: str
+ tiering_policy:
+ description:
+ - Tiering policy choices for how the storage resource data will be
+ distributed among the tiers available in the pool.
+ choices: ['AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']
+ type: str
+ state:
+ description:
+ - State variable to determine whether volume will exist or not.
+ choices: ['absent', 'present']
+ required: true
+ type: str
+ hosts:
+ description:
+ - Name of hosts for mapping to a volume.
+ type: list
+ elements: dict
+ suboptions:
+ host_name:
+ description:
+ - Name of the host.
+ type: str
+ host_id:
+ description:
+ - ID of the host.
+ type: str
+ hlu:
+ description:
+ - Host Lun Unit to be mapped/unmapped with this volume.
+ - It is an optional parameter, I(hlu) can be specified along
+ with I(host_name) or I(host_id) and I(mapping_state).
+ - If I(hlu) is not specified, unity will choose it automatically.
+ The maximum value supported is C(255).
+ type: str
+
+notes:
+ - The I(check_mode) is not supported.
+"""
+
+EXAMPLES = r"""
+- name: Create Volume
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ description: "{{description}}"
+ pool_name: "{{pool}}"
+ size: 2
+ cap_unit: "{{cap_GB}}"
+ is_thin: True
+ compression: True
+ advanced_dedup: True
+ state: "{{state_present}}"
+
+- name: Expand Volume by volume id
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_id: "{{vol_id}}"
+ size: 5
+ cap_unit: "{{cap_GB}}"
+ state: "{{state_present}}"
+
+- name: Modify Volume, map host by host_name
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ host_name: "{{host_name}}"
+ hlu: 5
+ mapping_state: "{{state_mapped}}"
+ state: "{{state_present}}"
+
+- name: Modify Volume, unmap host mapping by host_name
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ host_name: "{{host_name}}"
+ mapping_state: "{{state_unmapped}}"
+ state: "{{state_present}}"
+
+- name: Map multiple hosts to a Volume
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_id: "{{vol_id}}"
+ hosts:
+ - host_name: "10.226.198.248"
+ hlu: 1
+ - host_id: "Host_929"
+ hlu: 2
+ mapping_state: "mapped"
+ state: "present"
+
+- name: Modify Volume attributes
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ new_vol_name: "{{new_vol_name}}"
+ tiering_policy: "AUTOTIER"
+ compression: True
+ is_thin: True
+ advanced_dedup: True
+ state: "{{state_present}}"
+
+- name: Delete Volume by vol name
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_name: "{{vol_name}}"
+ state: "{{state_absent}}"
+
+- name: Delete Volume by vol id
+ dellemc.unity.volume:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ vol_id: "{{vol_id}}"
+ state: "{{state_absent}}"
+"""
+
+RETURN = r'''
+
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: True
+
+volume_details:
+ description: Details of the volume.
+ returned: When volume exists
+ type: dict
+ contains:
+ id:
+ description: The system generated ID given to the volume.
+ type: str
+ name:
+ description: Name of the volume.
+ type: str
+ description:
+ description: Description about the volume.
+ type: str
+ is_data_reduction_enabled:
+ description: Whether or not compression enabled on this volume.
+ type: bool
+ size_total_with_unit:
+ description: Size of the volume with actual unit.
+ type: str
+ snap_schedule:
+ description: Snapshot schedule applied to this volume.
+ type: dict
+ tiering_policy:
+ description: Tiering policy applied to this volume.
+ type: str
+ current_sp:
+ description: Current storage processor for this volume.
+ type: str
+ pool:
+ description: The pool in which this volume is allocated.
+ type: dict
+ host_access:
+ description: Host mapped to this volume.
+ type: list
+ io_limit_policy:
+ description: IO limit policy associated with this volume.
+ type: dict
+ wwn:
+ description: The world wide name of this volume.
+ type: str
+ is_thin_enabled:
+ description: Indicates whether thin provisioning is enabled for this
+ volume.
+ type: bool
+ sample: {
+ "current_node": "NodeEnum.SPB",
+ "data_reduction_percent": 0,
+ "data_reduction_ratio": 1.0,
+ "data_reduction_size_saved": 0,
+ "default_node": "NodeEnum.SPB",
+ "description": null,
+ "effective_io_limit_max_iops": null,
+ "effective_io_limit_max_kbps": null,
+ "existed": true,
+ "family_base_lun": {
+ "UnityLun": {
+ "hash": 8774954523796,
+ "id": "sv_27"
+ }
+ },
+ "family_clone_count": 0,
+ "hash": 8774954522426,
+ "health": {
+ "UnityHealth": {
+ "hash": 8774954528278
+ }
+ },
+ "host_access": [
+ {
+ "accessMask": "PRODUCTION",
+ "hlu": 0,
+ "id": "Host_75",
+ "name": "10.226.198.250"
+ }
+ ],
+ "id": "sv_27",
+ "io_limit_policy": null,
+ "is_advanced_dedup_enabled": false,
+ "is_compression_enabled": null,
+ "is_data_reduction_enabled": false,
+ "is_replication_destination": false,
+ "is_snap_schedule_paused": false,
+ "is_thin_clone": false,
+ "is_thin_enabled": false,
+ "metadata_size": 4294967296,
+ "metadata_size_allocated": 4026531840,
+ "name": "VSI-UNITY-test-task",
+ "per_tier_size_used": [
+ 111400714240,
+ 0,
+ 0
+ ],
+ "pool": {
+ "id": "pool_3",
+ "name": "Extreme_Perf_tier"
+ },
+ "size_allocated": 107374182400,
+ "size_total": 107374182400,
+ "size_total_with_unit": "100.0 GB",
+ "size_used": null,
+ "snap_count": 0,
+ "snap_schedule": null,
+ "snap_wwn": "60:06:01:60:5C:F0:50:00:94:3E:91:4D:51:5A:4F:97",
+ "snaps_size": 0,
+ "snaps_size_allocated": 0,
+ "storage_resource": {
+ "UnityStorageResource": {
+ "hash": 8774954518887
+ }
+ },
+ "tiering_policy": "TieringPolicyEnum.AUTOTIER_HIGH",
+ "type": "LUNTypeEnum.VMWARE_ISCSI",
+ "wwn": "60:06:01:60:5C:F0:50:00:00:B5:95:61:2E:34:DB:B2"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+import logging
+
+LOG = utils.get_logger('volume')
+
+application_type = "Ansible/1.6.0"
+
+
+def is_none_or_empty_string(param):
+
+ """ validates the input string for None or empty values
+ """
+ return not param or len(str(param)) <= 0
+
+
+class Volume(object):
+
+ """Class with volume operations"""
+
+ param_host_id = None
+ param_io_limit_pol_id = None
+ param_snap_schedule_name = None
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_volume_parameters())
+
+ mutually_exclusive = [['vol_name', 'vol_id'],
+ ['pool_name', 'pool_id'],
+ ['host_name', 'host_id']]
+
+ required_one_of = [['vol_name', 'vol_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=False,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+
+ def get_volume(self, vol_name=None, vol_id=None):
+ """Get the details of a volume.
+ :param vol_name: The name of the volume
+ :param vol_id: The id of the volume
+ :return: instance of the respective volume if exist.
+ """
+
+ id_or_name = vol_id if vol_id else vol_name
+ errormsg = "Failed to get the volume {0} with error {1}"
+
+ try:
+
+ obj_vol = self.unity_conn.get_lun(name=vol_name, _id=vol_id)
+
+ if vol_id and obj_vol.existed:
+ LOG.info("Successfully got the volume object %s ", obj_vol)
+ return obj_vol
+ elif vol_name:
+ LOG.info("Successfully got the volume object %s ", obj_vol)
+ return obj_vol
+ else:
+ LOG.info("Failed to get the volume %s", id_or_name)
+ return None
+
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ cred_err = "Incorrect username or password , {0}".format(
+ e.message)
+ msg = errormsg.format(id_or_name, cred_err)
+ self.module.fail_json(msg=msg)
+ else:
+ msg = errormsg.format(id_or_name, str(e))
+ self.module.fail_json(msg=msg)
+
+ except utils.UnityResourceNotFoundError as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ return None
+
+ except Exception as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_host(self, host_name=None, host_id=None):
+ """Get the instance of a host.
+ :param host_name: The name of the host
+ :param host_id: The id of the volume
+ :return: instance of the respective host if exist.
+ """
+
+ id_or_name = host_id if host_id else host_name
+ errormsg = "Failed to get the host {0} with error {1}"
+
+ try:
+
+ obj_host = self.unity_conn.get_host(name=host_name, _id=host_id)
+
+ if host_id and obj_host.existed:
+ LOG.info("Successfully got the host object %s ", obj_host)
+ return obj_host
+ elif host_name:
+ LOG.info("Successfully got the host object %s ", obj_host)
+ return obj_host
+ else:
+ msg = "Failed to get the host {0}".format(id_or_name)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_snap_schedule(self, name):
+ """Get the instance of a snapshot schedule.
+ :param name: The name of the snapshot schedule
+ :return: instance of the respective snapshot schedule if exist.
+ """
+
+ errormsg = "Failed to get the snapshot schedule {0} with error {1}"
+
+ try:
+ LOG.debug("Attempting to get Snapshot Schedule with name %s",
+ name)
+ obj_ss = utils.UnitySnapScheduleList.get(self.unity_conn._cli,
+ name=name)
+ if obj_ss and (len(obj_ss) > 0):
+ LOG.info("Successfully got Snapshot Schedule %s", obj_ss)
+ return obj_ss
+ else:
+ msg = "Failed to get snapshot schedule " \
+ "with name {0}".format(name)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = errormsg.format(name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_io_limit_policy(self, name=None, id=None):
+ """Get the instance of a io limit policy.
+ :param name: The io limit policy name
+ :param id: The io limit policy id
+ :return: instance of the respective io_limit_policy if exist.
+ """
+
+ errormsg = "Failed to get the io limit policy {0} with error {1}"
+ id_or_name = name if name else id
+
+ try:
+ obj_iopol = self.unity_conn.get_io_limit_policy(_id=id, name=name)
+ if id and obj_iopol.existed:
+ LOG.info("Successfully got the IO limit policy object %s",
+ obj_iopol)
+ return obj_iopol
+ elif name:
+ LOG.info("Successfully got the IO limit policy object %s ",
+ obj_iopol)
+ return obj_iopol
+ else:
+ msg = "Failed to get the io limit policy with {0}".format(
+ id_or_name)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = errormsg.format(name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_pool(self, pool_name=None, pool_id=None):
+ """Get the instance of a pool.
+ :param pool_name: The name of the pool
+ :param pool_id: The id of the pool
+ :return: Dict containing pool details if exists
+ """
+
+ id_or_name = pool_id if pool_id else pool_name
+ errormsg = "Failed to get the pool {0} with error {1}"
+
+ try:
+ obj_pool = self.unity_conn.get_pool(name=pool_name, _id=pool_id)
+
+ if pool_id and obj_pool.existed:
+ LOG.info("Successfully got the pool object %s",
+ obj_pool)
+ return obj_pool
+ if pool_name:
+ LOG.info("Successfully got pool %s", obj_pool)
+ return obj_pool
+ else:
+ msg = "Failed to get the pool with " \
+ "{0}".format(id_or_name)
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = errormsg.format(id_or_name, str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_node_enum(self, sp):
+ """Get the storage processor enum.
+ :param sp: The storage processor string
+ :return: storage processor enum
+ """
+
+ if sp in utils.NodeEnum.__members__:
+ return utils.NodeEnum[sp]
+ else:
+ errormsg = "Invalid choice {0} for storage processor".format(
+ sp)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_tiering_policy_enum(self, tiering_policy):
+ """Get the tiering_policy enum.
+ :param tiering_policy: The tiering_policy string
+ :return: tiering_policy enum
+ """
+
+ if tiering_policy in utils.TieringPolicyEnum.__members__:
+ return utils.TieringPolicyEnum[tiering_policy]
+ else:
+ errormsg = "Invalid choice {0} for tiering policy".format(
+ tiering_policy)
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def create_volume(self, obj_pool, size, host_access=None):
+ """Create a volume.
+ :param obj_pool: pool object instance
+ :param size: size of the volume in GB
+ :param host_access: host to be associated with this volume
+ :return: Volume object on successful creation
+ """
+
+ vol_name = self.module.params['vol_name']
+
+ try:
+
+ description = self.module.params['description']
+ compression = self.module.params['compression']
+ advanced_dedup = self.module.params['advanced_dedup']
+ is_thin = self.module.params['is_thin']
+ snap_schedule = None
+
+ sp = self.module.params['sp']
+ sp = self.get_node_enum(sp) if sp else None
+
+ io_limit_policy = self.get_io_limit_policy(
+ id=self.param_io_limit_pol_id) \
+ if self.module.params['io_limit_policy'] else None
+
+ if self.param_snap_schedule_name:
+ snap_schedule = {"name": self.param_snap_schedule_name}
+
+ tiering_policy = self.module.params['tiering_policy']
+ tiering_policy = self.get_tiering_policy_enum(tiering_policy) \
+ if tiering_policy else None
+
+ obj_vol = obj_pool.create_lun(lun_name=vol_name,
+ size_gb=size,
+ sp=sp,
+ host_access=host_access,
+ is_thin=is_thin,
+ description=description,
+ tiering_policy=tiering_policy,
+ snap_schedule=snap_schedule,
+ io_limit_policy=io_limit_policy,
+ is_compression=compression,
+ is_advanced_dedup_enabled=advanced_dedup)
+
+ LOG.info("Successfully created volume , %s", obj_vol)
+
+ return obj_vol
+
+ except Exception as e:
+ errormsg = "Create volume operation {0} failed" \
+ " with error {1}".format(vol_name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def host_access_modify_required(self, host_access_list):
+ """Check if host access modification is required
+ :param host_access_list: host access dict list
+ :return: Dict with attributes to modify, or None if no
+ modification is required.
+ """
+
+ try:
+ to_modify = False
+ mapping_state = self.module.params['mapping_state']
+
+ host_id_list = []
+ hlu_list = []
+ new_list = []
+ if not host_access_list and self.new_host_list and\
+ mapping_state == 'unmapped':
+ return to_modify
+
+ elif host_access_list:
+ for host_access in host_access_list.host:
+ host_id_list.append(host_access.id)
+ host = self.get_host(host_id=host_access.id).update()
+ host_dict = host.host_luns._get_properties()
+ LOG.debug("check if hlu present : %s", host_dict)
+
+ if "hlu" in host_dict.keys():
+ hlu_list.append(host_dict['hlu'])
+
+ if mapping_state == 'mapped':
+ if (self.param_host_id not in host_id_list):
+ for item in self.new_host_list:
+ new_list.append(item.get("host_id"))
+ if not list(set(new_list) - set(host_id_list)):
+ return False
+ to_modify = True
+
+ if mapping_state == 'unmapped':
+ if self.new_host_list:
+ for item in self.new_host_list:
+ new_list.append(item.get("host_id"))
+ if list(set(new_list) - set(host_id_list)):
+ return False
+ self.overlapping_list = list(set(host_id_list) - set(new_list))
+ to_modify = True
+ LOG.debug("host_access_modify_required : %s ", str(to_modify))
+ return to_modify
+
+ except Exception as e:
+ errormsg = "Failed to compare the host_access with error {0} " \
+ "{1}".format(host_access_list, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def volume_modify_required(self, obj_vol, cap_unit):
+ """Check if volume modification is required
+ :param obj_vol: volume instance
+ :param cap_unit: capacity unit
+ :return: Boolean value to indicate if modification is required
+ """
+
+ try:
+ to_update = {}
+
+ new_vol_name = self.module.params['new_vol_name']
+ if new_vol_name and obj_vol.name != new_vol_name:
+ to_update.update({'name': new_vol_name})
+
+ description = self.module.params['description']
+ if description and obj_vol.description != description:
+ to_update.update({'description': description})
+
+ size = self.module.params['size']
+ if size and cap_unit:
+ size_byte = int(utils.get_size_bytes(size, cap_unit))
+ if size_byte < obj_vol.size_total:
+ self.module.fail_json(msg="Volume size can be "
+ "expanded only")
+ elif size_byte > obj_vol.size_total:
+ to_update.update({'size': size_byte})
+
+ compression = self.module.params['compression']
+ if compression is not None and \
+ compression != obj_vol.is_data_reduction_enabled:
+ to_update.update({'is_compression': compression})
+
+ advanced_dedup = self.module.params['advanced_dedup']
+ if advanced_dedup is not None and \
+ advanced_dedup != obj_vol.is_advanced_dedup_enabled:
+ to_update.update({'is_advanced_dedup_enabled': advanced_dedup})
+
+ is_thin = self.module.params['is_thin']
+ if is_thin is not None and is_thin != obj_vol.is_thin_enabled:
+ self.module.fail_json(msg="Modifying is_thin is not allowed")
+
+ sp = self.module.params['sp']
+ if sp and self.get_node_enum(sp) != obj_vol.current_node:
+ to_update.update({'sp': self.get_node_enum(sp)})
+
+ tiering_policy = self.module.params['tiering_policy']
+ if tiering_policy and self.get_tiering_policy_enum(
+ tiering_policy) != obj_vol.tiering_policy:
+ to_update.update({'tiering_policy':
+ self.get_tiering_policy_enum(
+ tiering_policy)})
+
+ # prepare io_limit_policy object
+ if self.param_io_limit_pol_id:
+ if (not obj_vol.io_limit_policy) \
+ or (self.param_io_limit_pol_id
+ != obj_vol.io_limit_policy.id):
+ to_update.update(
+ {'io_limit_policy': self.param_io_limit_pol_id})
+
+ # prepare snap_schedule object
+ if self.param_snap_schedule_name:
+ if (not obj_vol.snap_schedule) \
+ or (self.param_snap_schedule_name
+ != obj_vol.snap_schedule.name):
+ to_update.update({'snap_schedule':
+ self.param_snap_schedule_name})
+
+ # for removing existing snap_schedule
+ if self.param_snap_schedule_name == "":
+ if obj_vol.snap_schedule:
+ to_update.update({'is_snap_schedule_paused': False})
+ else:
+ LOG.warn("No snapshot schedule is associated")
+
+ LOG.debug("Volume to modify Dict : %s", to_update)
+ if len(to_update) > 0:
+ return to_update
+ else:
+ return None
+
+ except Exception as e:
+ errormsg = "Failed to determine if volume {0},requires " \
+ "modification, with error {1}".format(obj_vol.name,
+ str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def multiple_host_map(self, host_dic_list, obj_vol):
+ """Attach multiple hosts to a volume
+ :param host_dic_list: hosts to map the volume
+ :param obj_vol: volume instance
+ :return: response from API call
+ """
+
+ try:
+ host_access = []
+ current_hosts = self.get_volume_host_access_list(obj_vol)
+ for existing_host in current_hosts:
+ host_access.append(
+ {'accessMask': eval('utils.HostLUNAccessEnum.' + existing_host['accessMask']),
+ 'host':
+ {'id': existing_host['id']}, 'hlu': existing_host['hlu']})
+ for item in host_dic_list:
+ host_access.append(
+ {'accessMask': utils.HostLUNAccessEnum.PRODUCTION,
+ 'host':
+ {'id': item['host_id']}, 'hlu': item['hlu']})
+ resp = obj_vol.modify(host_access=host_access)
+ return resp
+ except Exception as e:
+ errormsg = "Failed to attach hosts {0} with volume {1} with error {2} ".format(host_dic_list, obj_vol.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def multiple_detach(self, host_list_detach, obj_vol):
+ """Detach multiple hosts from a volume
+ :param host_list_detach: hosts to unmap the volume
+ :param obj_vol: volume instance
+ :return: response from API call
+ """
+
+ try:
+ host_access = []
+ for item in host_list_detach:
+ host_access.append({'accessMask': utils.HostLUNAccessEnum.PRODUCTION,
+ 'host': {'id': item}})
+ resp = obj_vol.modify(host_access=host_access)
+ return resp
+ except Exception as e:
+ errormsg = "Failed to detach hosts {0} from volume {1} with error {2} ".format(host_list_detach, obj_vol.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def modify_volume(self, obj_vol, to_modify_dict):
+ """modify volume attributes
+ :param obj_vol: volume instance
+ :param to_modify_dict: dict containing attributes to be modified.
+ :return: None
+ """
+
+ try:
+
+ if 'io_limit_policy' in to_modify_dict.keys():
+ to_modify_dict['io_limit_policy'] = self.get_io_limit_policy(
+ id=to_modify_dict['io_limit_policy'])
+
+ if 'snap_schedule' in to_modify_dict.keys() and \
+ to_modify_dict['snap_schedule'] != "":
+ to_modify_dict['snap_schedule'] = \
+ {"name": to_modify_dict['snap_schedule']}
+
+ param_list = ['name', 'size', 'host_access', 'description', 'sp',
+ 'io_limit_policy', 'tiering_policy',
+ 'snap_schedule', 'is_snap_schedule_paused',
+ 'is_compression', 'is_advanced_dedup_enabled']
+
+ for item in param_list:
+ if item not in to_modify_dict.keys():
+ to_modify_dict.update({item: None})
+
+ LOG.debug("Final update dict before modify "
+ "api call: %s", to_modify_dict)
+
+ obj_vol.modify(name=to_modify_dict['name'],
+ size=to_modify_dict['size'],
+ host_access=to_modify_dict['host_access'],
+ description=to_modify_dict['description'],
+ sp=to_modify_dict['sp'],
+ io_limit_policy=to_modify_dict['io_limit_policy'],
+ tiering_policy=to_modify_dict['tiering_policy'],
+ snap_schedule=to_modify_dict['snap_schedule'],
+ is_snap_schedule_paused=to_modify_dict['is_snap_schedule_paused'],
+ is_compression=to_modify_dict['is_compression'],
+ is_advanced_dedup_enabled=to_modify_dict['is_advanced_dedup_enabled'])
+
+ except Exception as e:
+ errormsg = "Failed to modify the volume {0} " \
+ "with error {1}".format(obj_vol.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def delete_volume(self, vol_id):
+ """Delete volume.
+ :param vol_obj: The object instance of the volume to be deleted
+ """
+
+ try:
+ obj_vol = self.get_volume(vol_id=vol_id)
+ obj_vol.delete(force_snap_delete=False)
+ return True
+
+ except Exception as e:
+ errormsg = "Delete operation of volume id:{0} " \
+ "failed with error {1}".format(id,
+ str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_volume_host_access_list(self, obj_vol):
+ """
+ Get volume host access list
+ :param obj_vol: volume instance
+ :return: host list
+ """
+ host_list = []
+ if obj_vol.host_access:
+ for host_access in obj_vol.host_access:
+ host = self.get_host(host_id=host_access.host.id).update()
+ hlu = None
+ for host_lun in host.host_luns:
+ if host_lun.lun.name == obj_vol.name:
+ hlu = host_lun.hlu
+ host_list.append({'name': host_access.host.name,
+ 'id': host_access.host.id,
+ 'accessMask': host_access.access_mask.name,
+ 'hlu': hlu})
+ return host_list
+
+ def get_volume_display_attributes(self, obj_vol):
+ """get display volume attributes
+ :param obj_vol: volume instance
+ :return: volume dict to display
+ """
+ try:
+ obj_vol = obj_vol.update()
+ volume_details = obj_vol._get_properties()
+ volume_details['size_total_with_unit'] = utils. \
+ convert_size_with_unit(int(volume_details['size_total']))
+ volume_details.update({'host_access': self.get_volume_host_access_list(obj_vol)})
+ if obj_vol.snap_schedule:
+ volume_details.update(
+ {'snap_schedule': {'name': obj_vol.snap_schedule.name,
+ 'id': obj_vol.snap_schedule.id}})
+ if obj_vol.io_limit_policy:
+ volume_details.update(
+ {'io_limit_policy': {'name': obj_vol.io_limit_policy.id,
+ 'id': obj_vol.io_limit_policy.id}})
+ if obj_vol.pool:
+ volume_details.update({'pool': {'name': obj_vol.pool.name,
+ 'id': obj_vol.pool.id}})
+
+ return volume_details
+
+ except Exception as e:
+ errormsg = "Failed to display the volume {0} with " \
+ "error {1}".format(obj_vol.name, str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_input_string(self):
+ """ validates the input string checks if it is empty string
+
+ """
+ invalid_string = ""
+ try:
+ no_chk_list = ['snap_schedule', 'description']
+ for key in self.module.params:
+ val = self.module.params[key]
+ if key not in no_chk_list and isinstance(val, str) \
+ and val == invalid_string:
+ errmsg = 'Invalid input parameter "" for {0}'.format(
+ key)
+ self.module.fail_json(msg=errmsg)
+
+ except Exception as e:
+ errormsg = "Failed to validate the module param with " \
+ "error {0}".format(str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_host_list(self, host_list_input):
+ """ validates the host_list_input value for None and empty
+
+ """
+ try:
+ for host_list in host_list_input:
+ if ("host_name" in host_list.keys() and "host_id" in host_list.keys()):
+ if host_list["host_name"] and host_list["host_id"]:
+ errmsg = 'parameters are mutually exclusive: host_name|host_id'
+ self.module.fail_json(msg=errmsg)
+ is_host_details_missing = True
+ for key, value in host_list.items():
+ if key == "host_name" and not is_none_or_empty_string(value):
+ is_host_details_missing = False
+ elif key == "host_id" and not is_none_or_empty_string(value):
+ is_host_details_missing = False
+
+ if is_host_details_missing:
+ errmsg = 'Invalid input parameter for {0}'.format(key)
+ self.module.fail_json(msg=errmsg)
+
+ except Exception as e:
+ errormsg = "Failed to validate the module param with " \
+ "error {0}".format(str(e))
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def resolve_host_mappings(self, hosts):
+ """ This method creates a dictionary of hosts and hlu parameter values
+ :param hosts: host and hlu value passed from input file
+ :return: list of host and hlu dictionary
+ """
+ host_list_new = []
+
+ if hosts:
+ for item in hosts:
+ host_dict = dict()
+ host_id = None
+ hlu = None
+ if item['host_name']:
+ host = self.get_host(host_name=item['host_name'])
+ if host:
+ host_id = host.id
+ if item['host_id']:
+ host_id = item['host_id']
+ if item['hlu']:
+ hlu = item['hlu']
+ host_dict['host_id'] = host_id
+ host_dict['hlu'] = hlu
+ host_list_new.append(host_dict)
+ return host_list_new
+
+ def perform_module_operation(self):
+ """
+ Perform different actions on volume module based on parameters
+ passed in the playbook
+ """
+ self.new_host_list = []
+ self.overlapping_list = []
+ vol_name = self.module.params['vol_name']
+ vol_id = self.module.params['vol_id']
+ pool_name = self.module.params['pool_name']
+ pool_id = self.module.params['pool_id']
+ size = self.module.params['size']
+ cap_unit = self.module.params['cap_unit']
+ snap_schedule = self.module.params['snap_schedule']
+ io_limit_policy = self.module.params['io_limit_policy']
+ host_name = self.module.params['host_name']
+ host_id = self.module.params['host_id']
+ hlu = self.module.params['hlu']
+ mapping_state = self.module.params['mapping_state']
+ new_vol_name = self.module.params['new_vol_name']
+ state = self.module.params['state']
+ hosts = self.module.params['hosts']
+
+ # result is a dictionary to contain end state and volume details
+ changed = False
+ result = dict(
+ changed=False,
+ volume_details={}
+ )
+
+ to_modify_dict = None
+ volume_details = None
+ to_modify_host = False
+
+ self.validate_input_string()
+
+ if hosts:
+ self.validate_host_list(hosts)
+
+ if size is not None and size == 0:
+ self.module.fail_json(msg="Size can not be 0 (Zero)")
+
+ if size and not cap_unit:
+ cap_unit = 'GB'
+
+ if (cap_unit is not None) and not size:
+ self.module.fail_json(msg="cap_unit can be specified along "
+ "with size")
+
+ if hlu and (not host_name and not host_id and not hosts):
+ self.module.fail_json(msg="hlu can be specified with "
+ "host_id or host_name")
+ if mapping_state and (not host_name and not host_id and not hosts):
+ self.module.fail_json(msg="mapping_state can be specified"
+ " with host_id or host_name or hosts")
+
+ obj_vol = self.get_volume(vol_id=vol_id, vol_name=vol_name)
+
+ if host_name or host_id:
+ if not mapping_state:
+ errmsg = "'mapping_state' is required along with " \
+ "'host_name' or 'host_id' or 'hosts'"
+ self.module.fail_json(msg=errmsg)
+ host = [{'host_name': host_name, 'host_id': host_id, 'hlu': hlu}]
+ self.new_host_list = self.resolve_host_mappings(host)
+
+ if hosts:
+ if not mapping_state:
+ errmsg = "'mapping_state' is required along with " \
+ "'host_name' or 'host_id' or 'hosts'"
+ self.module.fail_json(msg=errmsg)
+ self.new_host_list += self.resolve_host_mappings(hosts)
+
+ if io_limit_policy:
+ io_limit_policy = self.get_io_limit_policy(name=io_limit_policy)
+ self.param_io_limit_pol_id = io_limit_policy.id
+
+ if snap_schedule:
+ snap_schedule = self.get_snap_schedule(name=snap_schedule)
+ self.param_snap_schedule_name = snap_schedule.name[0]
+
+ # this is for removing existing snap_schedule
+ if snap_schedule == "":
+ self.param_snap_schedule_name = snap_schedule
+
+ if obj_vol:
+ volume_details = obj_vol._get_properties()
+ vol_id = obj_vol.get_id()
+ to_modify_dict = self.volume_modify_required(obj_vol, cap_unit)
+ LOG.debug("Volume Modify Required: %s", to_modify_dict)
+ if obj_vol.host_access:
+ to_modify_host = self.host_access_modify_required(
+ host_access_list=obj_vol.host_access)
+ LOG.debug("Host Modify Required in access: %s", to_modify_host)
+ elif self.new_host_list:
+ to_modify_host = self.host_access_modify_required(
+ host_access_list=obj_vol.host_access)
+ LOG.debug("Host Modify Required: %s", to_modify_host)
+
+ if state == 'present' and not volume_details:
+ if not vol_name:
+ msg_noname = "volume with id {0} is not found, unable to " \
+ "create a volume without a valid " \
+ "vol_name".format(vol_id)
+ self.module.fail_json(msg=msg_noname)
+
+ if snap_schedule == "":
+ self.module.fail_json(msg="Invalid snap_schedule")
+
+ if new_vol_name:
+ self.module.fail_json(msg="new_vol_name is not required "
+ "to create a new volume")
+ if not pool_name and not pool_id:
+ self.module.fail_json(msg="pool_id or pool_name is required "
+ "to create new volume")
+ if not size:
+ self.module.fail_json(msg="Size is required to create"
+ " a volume")
+ host_access = None
+ if self.new_host_list:
+ host_access = []
+ for item in self.new_host_list:
+ if item['hlu']:
+ host_access.append(
+ {'accessMask': utils.HostLUNAccessEnum.PRODUCTION, 'host': {'id': item['host_id']},
+ 'hlu': item['hlu']})
+ else:
+ host_access.append(
+ {'accessMask': utils.HostLUNAccessEnum.PRODUCTION, 'host': {'id': item['host_id']}})
+
+ size = utils.get_size_in_gb(size, cap_unit)
+
+ obj_pool = self.get_pool(pool_name=pool_name, pool_id=pool_id)
+
+ obj_vol = self.create_volume(obj_pool=obj_pool, size=size,
+ host_access=host_access)
+ if obj_vol:
+ LOG.debug("Successfully created volume , %s", obj_vol)
+ vol_id = obj_vol.id
+ volume_details = obj_vol._get_properties()
+ LOG.debug("Got volume id , %s", vol_id)
+ changed = True
+
+ if state == 'present' and volume_details and to_modify_dict:
+ self.modify_volume(obj_vol=obj_vol, to_modify_dict=to_modify_dict)
+ changed = True
+
+ if (state == 'present' and volume_details
+ and mapping_state == 'mapped' and to_modify_host):
+ if self.new_host_list:
+ resp = self.multiple_host_map(host_dic_list=self.new_host_list, obj_vol=obj_vol)
+ changed = True if resp else False
+
+ if (state == 'present' and volume_details
+ and mapping_state == 'unmapped' and to_modify_host):
+ if self.new_host_list:
+ resp = self.multiple_detach(host_list_detach=self.overlapping_list, obj_vol=obj_vol)
+ LOG.info(resp)
+ changed = True if resp else False
+
+ if state == 'absent' and volume_details:
+ changed = self.delete_volume(vol_id)
+ volume_details = None
+
+ if state == 'present' and volume_details:
+ volume_details = self.get_volume_display_attributes(
+ obj_vol=obj_vol)
+
+ result['changed'] = changed
+ result['volume_details'] = volume_details
+ self.module.exit_json(**result)
+
+
+def get_volume_parameters():
+ """This method provide parameters required for the ansible volume
+ module on Unity"""
+ return dict(
+ vol_name=dict(required=False, type='str'),
+ vol_id=dict(required=False, type='str'),
+ description=dict(required=False, type='str'),
+ pool_name=dict(required=False, type='str'),
+ pool_id=dict(required=False, type='str'),
+ size=dict(required=False, type='int'),
+ cap_unit=dict(required=False, type='str', choices=['GB', 'TB']),
+ is_thin=dict(required=False, type='bool'),
+ compression=dict(required=False, type='bool'),
+ advanced_dedup=dict(required=False, type='bool'),
+ sp=dict(required=False, type='str', choices=['SPA', 'SPB']),
+ io_limit_policy=dict(required=False, type='str'),
+ snap_schedule=dict(required=False, type='str'),
+ host_name=dict(required=False, type='str'),
+ host_id=dict(required=False, type='str'),
+ hosts=dict(required=False, type='list', elements='dict',
+ options=dict(
+ host_id=dict(required=False, type='str'),
+ host_name=dict(required=False, type='str'),
+ hlu=dict(required=False, type='str')
+ )),
+ hlu=dict(required=False, type='int'),
+ mapping_state=dict(required=False, type='str',
+ choices=['mapped', 'unmapped']),
+ new_vol_name=dict(required=False, type='str'),
+ tiering_policy=dict(required=False, type='str', choices=[
+ 'AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']),
+ state=dict(required=True, type='str', choices=['present', 'absent'])
+ )
+
+
+def main():
+ """ Create Unity volume object and perform action on it
+ based on user input from playbook"""
+ obj = Volume()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/requirements.txt b/ansible_collections/dellemc/unity/requirements.txt
new file mode 100644
index 000000000..2325e97fb
--- /dev/null
+++ b/ansible_collections/dellemc/unity/requirements.txt
@@ -0,0 +1,3 @@
+urllib3
+storops>=1.2.11
+setuptools
diff --git a/ansible_collections/dellemc/unity/requirements.yml b/ansible_collections/dellemc/unity/requirements.yml
new file mode 100644
index 000000000..548a31076
--- /dev/null
+++ b/ansible_collections/dellemc/unity/requirements.yml
@@ -0,0 +1,3 @@
+---
+collections:
+ - name: dellemc.unity
diff --git a/ansible_collections/dellemc/unity/tests/requirements.txt b/ansible_collections/dellemc/unity/tests/requirements.txt
new file mode 100644
index 000000000..3541acd15
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/requirements.txt
@@ -0,0 +1,7 @@
+pytest
+pytest-xdist
+pytest-mock
+pytest-cov
+pytest-forked
+coverage==4.5.4
+mock
diff --git a/ansible_collections/dellemc/unity/tests/sanity/ignore-2.12.txt b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.12.txt
new file mode 100644
index 000000000..f78c82922
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.12.txt
@@ -0,0 +1,33 @@
+plugins/modules/nfs.py compile-2.6
+plugins/modules/nfs.py import-2.6
+plugins/modules/consistencygroup.py validate-modules:missing-gplv3-license
+plugins/modules/filesystem.py validate-modules:missing-gplv3-license
+plugins/modules/filesystem_snapshot.py validate-modules:missing-gplv3-license
+plugins/modules/info.py validate-modules:missing-gplv3-license
+plugins/modules/host.py validate-modules:missing-gplv3-license
+plugins/modules/nasserver.py validate-modules:missing-gplv3-license
+plugins/modules/nfs.py validate-modules:missing-gplv3-license
+plugins/modules/smbshare.py validate-modules:missing-gplv3-license
+plugins/modules/snapshot.py validate-modules:missing-gplv3-license
+plugins/modules/snapshotschedule.py validate-modules:missing-gplv3-license
+plugins/modules/storagepool.py validate-modules:missing-gplv3-license
+plugins/modules/tree_quota.py validate-modules:missing-gplv3-license
+plugins/modules/user_quota.py validate-modules:missing-gplv3-license
+plugins/modules/volume.py validate-modules:missing-gplv3-license
+plugins/modules/cifsserver.py validate-modules:missing-gplv3-license
+plugins/modules/nfsserver.py validate-modules:missing-gplv3-license
+plugins/modules/host.py import-2.6
+plugins/modules/host.py import-2.7
+plugins/modules/interface.py import-2.6
+plugins/modules/interface.py import-2.7
+plugins/modules/nfs.py import-2.7
+plugins/modules/nfs.py import-3.5
+plugins/modules/nfs.py compile-2.7
+plugins/modules/nfs.py compile-3.5
+plugins/modules/filesystem.py import-2.6
+plugins/modules/filesystem.py compile-2.6
+plugins/modules/filesystem.py compile-2.7
+plugins/modules/filesystem.py compile-3.5
+plugins/modules/filesystem.py import-2.7
+plugins/modules/filesystem.py import-3.5
+plugins/modules/interface.py validate-modules:missing-gplv3-license \ No newline at end of file
diff --git a/ansible_collections/dellemc/unity/tests/sanity/ignore-2.13.txt b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.13.txt
new file mode 100644
index 000000000..a175e9976
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.13.txt
@@ -0,0 +1,27 @@
+plugins/modules/consistencygroup.py validate-modules:missing-gplv3-license
+plugins/modules/filesystem.py validate-modules:missing-gplv3-license
+plugins/modules/filesystem_snapshot.py validate-modules:missing-gplv3-license
+plugins/modules/info.py validate-modules:missing-gplv3-license
+plugins/modules/host.py validate-modules:missing-gplv3-license
+plugins/modules/nasserver.py validate-modules:missing-gplv3-license
+plugins/modules/nfs.py validate-modules:missing-gplv3-license
+plugins/modules/smbshare.py validate-modules:missing-gplv3-license
+plugins/modules/snapshot.py validate-modules:missing-gplv3-license
+plugins/modules/snapshotschedule.py validate-modules:missing-gplv3-license
+plugins/modules/storagepool.py validate-modules:missing-gplv3-license
+plugins/modules/tree_quota.py validate-modules:missing-gplv3-license
+plugins/modules/user_quota.py validate-modules:missing-gplv3-license
+plugins/modules/volume.py validate-modules:missing-gplv3-license
+plugins/modules/cifsserver.py validate-modules:missing-gplv3-license
+plugins/modules/nfsserver.py validate-modules:missing-gplv3-license
+plugins/modules/host.py import-2.7
+plugins/modules/interface.py import-2.7
+plugins/modules/nfs.py import-2.7
+plugins/modules/nfs.py import-3.5
+plugins/modules/nfs.py compile-2.7
+plugins/modules/nfs.py compile-3.5
+plugins/modules/filesystem.py compile-2.7
+plugins/modules/filesystem.py compile-3.5
+plugins/modules/filesystem.py import-2.7
+plugins/modules/filesystem.py import-3.5
+plugins/modules/interface.py validate-modules:missing-gplv3-license
diff --git a/ansible_collections/dellemc/unity/tests/sanity/ignore-2.14.txt b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.14.txt
new file mode 100644
index 000000000..a175e9976
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.14.txt
@@ -0,0 +1,27 @@
+plugins/modules/consistencygroup.py validate-modules:missing-gplv3-license
+plugins/modules/filesystem.py validate-modules:missing-gplv3-license
+plugins/modules/filesystem_snapshot.py validate-modules:missing-gplv3-license
+plugins/modules/info.py validate-modules:missing-gplv3-license
+plugins/modules/host.py validate-modules:missing-gplv3-license
+plugins/modules/nasserver.py validate-modules:missing-gplv3-license
+plugins/modules/nfs.py validate-modules:missing-gplv3-license
+plugins/modules/smbshare.py validate-modules:missing-gplv3-license
+plugins/modules/snapshot.py validate-modules:missing-gplv3-license
+plugins/modules/snapshotschedule.py validate-modules:missing-gplv3-license
+plugins/modules/storagepool.py validate-modules:missing-gplv3-license
+plugins/modules/tree_quota.py validate-modules:missing-gplv3-license
+plugins/modules/user_quota.py validate-modules:missing-gplv3-license
+plugins/modules/volume.py validate-modules:missing-gplv3-license
+plugins/modules/cifsserver.py validate-modules:missing-gplv3-license
+plugins/modules/nfsserver.py validate-modules:missing-gplv3-license
+plugins/modules/host.py import-2.7
+plugins/modules/interface.py import-2.7
+plugins/modules/nfs.py import-2.7
+plugins/modules/nfs.py import-3.5
+plugins/modules/nfs.py compile-2.7
+plugins/modules/nfs.py compile-3.5
+plugins/modules/filesystem.py compile-2.7
+plugins/modules/filesystem.py compile-3.5
+plugins/modules/filesystem.py import-2.7
+plugins/modules/filesystem.py import-3.5
+plugins/modules/interface.py validate-modules:missing-gplv3-license
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_api_exception.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_api_exception.py
new file mode 100644
index 000000000..4ddee9661
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_api_exception.py
@@ -0,0 +1,19 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock ApiException for Unity Test modules"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+class MockApiException(Exception):
+ body = "SDK Error message"
+ status = "500"
+
+
+class HttpError(Exception):
+ body = "Http Error message"
+ http_status = 401
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_cifsserver_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_cifsserver_api.py
new file mode 100644
index 000000000..427d530fa
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_cifsserver_api.py
@@ -0,0 +1,200 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of CIFS server module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+from unittest.mock import MagicMock
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+
+
+class MockCIFSServerApi:
+ CIFS_SERVER_MODULE_ARGS = {
+ 'nas_server_id': None,
+ 'nas_server_name': None,
+ 'netbios_name': None,
+ 'workgroup': None,
+ 'local_password': None,
+ 'domain': None,
+ 'domain_username': None,
+ 'domain_password': None,
+ 'cifs_server_id': None,
+ 'cifs_server_name': None,
+ 'interfaces': None,
+ 'unjoin_cifs_server_account': None,
+ 'state': None
+ }
+
+ @staticmethod
+ def get_cifs_server_details_method_response():
+ return {
+ "description": None,
+ "domain": "xxx.xxx.xxx.xxx",
+ "existed": True,
+ "file_interfaces": {
+ "UnityFileInterfaceList": [
+ {
+ "UnityFileInterface": {
+ "hash": 8791477905949,
+ "id": "if_43"
+ }
+ }
+ ]
+ },
+ "hash": 8791478461637,
+ "health": {
+ "UnityHealth": {
+ "hash": 8791478461623
+ }
+ },
+ "id": "cifs_59",
+ "is_standalone": False,
+ "last_used_organizational_unit": "ou=Computers,ou=EMC NAS servers",
+ "name": "test_cifs_server",
+ "nas_server": {
+ "UnityNasServer": {
+ "hash": 8791478461595,
+ "id": "nas_18"
+ }
+ },
+ "netbios_name": "TEST_CIFS_SERVER",
+ "smb_multi_channel_supported": True,
+ "smb_protocol_versions": [
+ "1.0",
+ "2.0",
+ "2.1",
+ "3.0"
+ ],
+ "smbca_supported": True,
+ "workgroup": None
+ }
+
+ @staticmethod
+ def get_cifs_server_details_method_netbios_response():
+ return {
+ "UnityCifsServerList": [{
+ "UnityCifsServer": {
+ "existed": True,
+ "file_interfaces": {
+ "UnityFileInterfaceList": [{
+ "UnityFileInterface": {
+ "hash": -9223363293222387298,
+ "id": "if_43"
+ }
+ }]
+ },
+ "hash": 8743632213638,
+ "health": {
+ "UnityHealth": {
+ "hash": -9223363293222562209
+ }
+ },
+ "id": "cifs_60",
+ "is_standalone": True,
+ "nas_server": {
+ "UnityNasServer": {
+ "hash": -9223363293221242245,
+ "id": "nas_18"
+ }
+ },
+ "netbios_name": "ANSIBLE_CIFS",
+ "smb_multi_channel_supported": True,
+ "smb_protocol_versions": ["1.0", "2.0", "2.1", "3.0"],
+ "smbca_supported": True,
+ "workgroup": "ANSIBLE"
+ }
+ }]
+ }
+
+ @staticmethod
+ def create_cifs_server_without_nas():
+ return "Please provide nas server id/name to create CIFS server."
+
+ @staticmethod
+ def invalid_credentials():
+ return "Incorrect username or password provided."
+
+ @staticmethod
+ def modify_error_msg():
+ return "Modification is not supported through Ansible module"
+
+ @staticmethod
+ def get_nas_server_details():
+ return {
+ "UnityNasServer": {
+ "cifs_server": {
+ "UnityCifsServerList": [{
+ "UnityCifsServer": {
+ "hash": 8734183189936,
+ "id": "cifs_60"
+ }
+ }]
+ },
+ "current_sp": {
+ "UnityStorageProcessor": {
+ "hash": 8734188780762,
+ "id": "spa"
+ }
+ },
+ "current_unix_directory_service": "NasServerUnixDirectoryServiceEnum.NONE",
+ "existed": True,
+ "file_dns_server": {
+ "UnityFileDnsServer": {
+ "hash": 8734183189782,
+ "id": "dns_11"
+ }
+ },
+ "file_interface": {
+ "UnityFileInterfaceList": [{
+ "UnityFileInterface": {
+ "hash": -9223363302671584431,
+ "id": "if_43"
+ }
+ }]
+ },
+ "hash": -9223363302671053452,
+ "health": {
+ "UnityHealth": {
+ "hash": 8734182402245
+ }
+ },
+ "home_sp": {
+ "UnityStorageProcessor": {
+ "hash": -9223363302671594510,
+ "id": "spa"
+ }
+ },
+ "id": "nas_18",
+ "is_backup_only": False,
+ "is_multi_protocol_enabled": False,
+ "is_packet_reflect_enabled": False,
+ "is_replication_destination": False,
+ "is_replication_enabled": False,
+ "name": "test_nas1",
+ "pool": {
+ "UnityPool": {
+ "hash": -9223363302672128291,
+ "id": "pool_7"
+ }
+ },
+ "preferred_interface_settings": {
+ "UnityPreferredInterfaceSettings": {
+ "hash": -9223363302671585904,
+ "id": "preferred_if_16"
+ }
+ },
+ "replication_type": "ReplicationTypeEnum.NONE",
+ "size_allocated": 2952790016,
+ "virus_checker": {
+ "UnityVirusChecker": {
+ "hash": 8734183191465,
+ "id": "cava_18"
+ }
+ }
+ }
+ }
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_consistencygroup_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_consistencygroup_api.py
new file mode 100644
index 000000000..07fe6b5d7
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_consistencygroup_api.py
@@ -0,0 +1,122 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of consistency group module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+from unittest.mock import MagicMock
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+
+
+class MockConsistenyGroupApi:
+ CONSISTENCY_GROUP_MODULE_ARGS = {
+ 'unispherehost': '**.***.**.***',
+ 'port': '123',
+ 'cg_id': None,
+ 'cg_name': None,
+ 'new_cg_name': None,
+ 'pool_id': None,
+ 'description': None,
+ 'snap_schedule': None,
+ 'tiering_policy': None,
+ 'volumes': [],
+ 'vol_state': None,
+ 'hosts': [],
+ 'mapping_state': None,
+ 'replication_params': {},
+ 'replication_state': None,
+ 'state': None
+ }
+ IP_ADDRESS_MOCK_VALUE = '***.***.***.**'
+
+ @staticmethod
+ def cg_get_details_method_response():
+ return {'advanced_dedup_status': 'DedupStatusEnum.DISABLED', 'block_host_access': None, 'data_reduction_percent': 0,
+ 'data_reduction_ratio': 1.0, 'data_reduction_size_saved': 0, 'data_reduction_status': 'DataReductionStatusEnum.DISABLED',
+ 'datastores': None, 'dedup_status': None, 'description': '', 'esx_filesystem_block_size': None,
+ 'esx_filesystem_major_version': None, 'filesystem': None, 'health': {}, 'host_v_vol_datastore': None,
+ 'id': 'cg_id_1', 'is_replication_destination': False, 'is_snap_schedule_paused': None,
+ 'luns': [{'id': 'lun_id_1', 'name': 'test_lun_cg_issue', 'is_thin_enabled': False,
+ 'size_total': 1, 'is_data_reduction_enabled': False}],
+ 'name': 'lun_test_cg_source_12', 'per_tier_size_used': [1, 0, 0],
+ 'pools': [{'id': 'pool_id_1'}],
+ 'relocation_policy': 'TieringPolicyEnum.AUTOTIER_HIGH', 'replication_type': 'ReplicationTypeEnum.NONE',
+ 'size_allocated': 0, 'size_total': 1, 'size_used': None, 'snap_count': 0, 'snap_schedule': None,
+ 'snaps_size_allocated': 0, 'snaps_size_total': 0, 'thin_status': 'ThinStatusEnum.TRUE',
+ 'type': 'StorageResourceTypeEnum.CONSISTENCY_GROUP', 'virtual_volumes': None, 'vmware_uuid': None,
+ 'existed': True, 'snapshots': [], 'cg_replication_enabled': False}
+
+ @staticmethod
+ def get_cg_object():
+ return MockSDKObject({'advanced_dedup_status': 'DedupStatusEnum.DISABLED', 'block_host_access': None,
+ 'data_reduction_percent': 0, 'data_reduction_ratio': 1.0, 'data_reduction_size_saved': 0,
+ 'data_reduction_status': 'DataReductionStatusEnum.DISABLED',
+ 'datastores': None, 'dedup_status': None, 'description': '', 'esx_filesystem_block_size': None,
+ 'esx_filesystem_major_version': None, 'filesystem': None, 'health': {}, 'host_v_vol_datastore': None,
+ 'id': 'cg_id_1', 'is_replication_destination': False, 'is_snap_schedule_paused': None,
+ 'luns': [MockSDKObject({'id': 'lun_id_1', 'name': 'test_lun_cg_issue',
+ 'is_thin_enabled': False, 'size_total': 1, 'is_data_reduction_enabled': False})],
+ 'name': 'lun_test_cg_source_12', 'per_tier_size_used': [1, 0, 0],
+ 'pools': [MockSDKObject({'id': 'pool_id_1'})],
+ 'relocation_policy': 'TieringPolicyEnum.AUTOTIER_HIGH', 'replication_type': 'ReplicationTypeEnum.NONE',
+ 'size_allocated': 0, 'size_total': 1, 'size_used': None, 'snap_count': 0, 'snap_schedule': None,
+ 'snaps_size_allocated': 0, 'snaps_size_total': 0, 'thin_status': 'ThinStatusEnum.TRUE',
+ 'type': 'StorageResourceTypeEnum.CONSISTENCY_GROUP', 'virtual_volumes': None, 'vmware_uuid': None,
+ 'existed': True, 'snapshots': [], 'cg_replication_enabled': False})
+
+ @staticmethod
+ def get_cg_replication_dependent_response(response_type):
+ if response_type == 'cg_replication_enabled_details':
+ cg_replication_enabled_details = MockConsistenyGroupApi.cg_get_details_method_response()
+ cg_replication_enabled_details['cg_replication_enabled'] = True
+ return cg_replication_enabled_details
+ elif response_type == 'remote_system':
+ return [MockSDKObject({"connection_type": "ReplicationCapabilityEnum.ASYNC", "existed": True,
+ "health": {"UnityHealth": {}}, "id": "system_id_1", "local_spa_interfaces": [MockConsistenyGroupApi.IP_ADDRESS_MOCK_VALUE],
+ "local_spb_interfaces": [MockConsistenyGroupApi.IP_ADDRESS_MOCK_VALUE],
+ "management_address": "**.***.**.**", "model": "U XXX",
+ "name": "ABXXXXXX", "remote_spa_interfaces": [MockConsistenyGroupApi.IP_ADDRESS_MOCK_VALUE],
+ "remote_spb_interfaces": [MockConsistenyGroupApi.IP_ADDRESS_MOCK_VALUE],
+ "serial_number": "ABXXXXXX", "sync_fc_ports": ["abc_def", "ghi_jkl"], "username": "username"})]
+ elif response_type == 'remote_system_pool_object':
+ return MockSDKObject({"alert_threshold": 60, "creation_time": "2021-10-18 12:51:27+00:00", "description": "",
+ "existed": True, "harvest_state": "UsageHarvestStateEnum.IDLE", "health": {"UnityHealth": {}},
+ "id": "pool_3", "is_all_flash": True, "is_empty": False, "is_fast_cache_enabled": False,
+ "is_harvest_enabled": True, "is_snap_harvest_enabled": True, "name": "Extreme_Perf_tier",
+ "object_id": 1, "pool_fast_vp": {"UnityPoolFastVp": {}}, "pool_space_harvest_high_threshold": 95.0,
+ "pool_space_harvest_low_threshold": 70.5, "pool_type": "StoragePoolTypeEnum.DYNAMIC",
+ "raid_type": "RaidTypeEnum.RAID5", "size_free": 1, "size_subscribed": 1, "size_total": 1,
+ "size_used": 1, "snap_size_subscribed": 1, "snap_size_used": 1, "snap_space_harvest_high_threshold": 20.5,
+ "snap_space_harvest_low_threshold": 1.0,
+ "tiers": {"UnityPoolTierList": [{"UnityPoolTier": {}}, {"UnityPoolTier": {}}, {"UnityPoolTier": {}}]}})
+ elif response_type == 'replication_session':
+ return MockSDKObject({"current_transfer_est_remain_time": 0, "daily_snap_replication_policy": {},
+ "dst_resource_id": "dest_id_1", "dst_status": "ReplicationSessionStatusEnum.OK", "existed": True,
+ "health": {}, "hourly_snap_replication_policy": {},
+ "id": "111111_XXX1111111_0000_1111111_XXX111111111_0000",
+ "last_sync_time": "2022-02-17 09: 50: 54+00: 00",
+ "local_role": "ReplicationSessionReplicationRoleEnum.LOOPBACK",
+ "max_time_out_of_sync": 60, "members": {}, "name": "rep_session_1",
+ "network_status": "ReplicationSessionNetworkStatusEnum.OK", "remote_system": {},
+ "replication_resource_type": "ReplicationEndpointResourceTypeEnum.CONSISTENCYGROUP",
+ "src_resource_id": "src_id_1",
+ "src_status": "ReplicationSessionStatusEnum.OK",
+ "status": "ReplicationOpStatusEnum.AUTO_SYNC_CONFIGURED",
+ "sync_progress": 0, "sync_state": "ReplicationSessionSyncStateEnum.IDLE"})
+ elif response_type == 'destination_cg_name_validation':
+ return 'destination_cg_name value should be in range of 1 to 95'
+ elif response_type == 'enable_cg_exception':
+ return 'Enabling replication to the consistency group lun_test_cg_source_12 failed with error '
+ elif response_type == 'disable_cg_exception':
+ return 'Disabling replication to the consistency group lun_test_cg_source_12 failed with error '
+
+ @staticmethod
+ def get_remote_system_conn_response():
+ conn = MockConsistenyGroupApi.get_cg_replication_dependent_response("remote_system")[0]
+ conn.get_pool = MagicMock(return_value=MockConsistenyGroupApi.get_cg_replication_dependent_response('remote_system_pool_object'))
+ return conn
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_filesystem_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_filesystem_api.py
new file mode 100644
index 000000000..d855815a2
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_filesystem_api.py
@@ -0,0 +1,68 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of FileSystem module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+
+
+class MockFileSystemApi:
+ @staticmethod
+ def get_file_system_response():
+ filesystem_response = {"UnityFileSystem": {
+ "access_policy": "AccessPolicyEnum.NATIVE",
+ "cifs_notify_on_change_dir_depth": 512,
+ "data_reduction_percent": 0,
+ "data_reduction_ratio": 1.0,
+ "data_reduction_size_saved": 0,
+ "description": "",
+ "existed": True,
+ "size_total": 5,
+ "folder_rename_policy": "FSRenamePolicyEnum.SMB_RENAME_FORBIDDEN",
+ "id": "fs_208",
+ "replication_type": "Remote"}}
+ filesystem_response['storage_resource'] = MockSDKObject({'replication_type': None})
+ return filesystem_response
+
+ @staticmethod
+ def get_replication_params(is_valid=True):
+ rpo = 60
+ if not is_valid:
+ rpo = 2
+ return {'replication_params': {
+ 'replication_name': None,
+ 'new_replication_name': None,
+ 'replication_mode': 'asynchronous',
+ 'replication_type': 'local',
+ 'rpo': rpo,
+ 'remote_system': None,
+ 'destination_pool_name': 'pool_test_name_1',
+ 'destination_pool_id': None},
+ 'replication_state': 'enable',
+ 'state': 'present'
+ }
+ else:
+ return {'replication_params': {
+ 'replication_name': None,
+ 'replication_mode': 'asynchronous',
+ 'new_replication_name': None,
+ 'replication_type': 'remote',
+ 'rpo': rpo,
+ 'remote_system': {
+ 'remote_system_host': '1.1.1.1',
+ 'remote_system_verifycert': False,
+ 'remote_system_username': 'username',
+ 'remote_system_password': 'password',
+ 'remote_system_port': 1
+ },
+ 'destination_pool_name': 'pool_test_name_1',
+ 'destination_pool_id': None},
+ 'replication_state': 'enable',
+ 'state': 'present'
+ }
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_host_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_host_api.py
new file mode 100644
index 000000000..4e93b6285
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_host_api.py
@@ -0,0 +1,154 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of host module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+
+
+class MockHostApi:
+ HOST_MODULE_ARGS = {
+ 'unispherehost': '**.***.**.***',
+ 'port': '123',
+ 'host_id': None,
+ 'host_name': None,
+ 'host_os': None,
+ 'description': None,
+ 'initiators': None,
+ 'initiator_state': None,
+ 'new_host_name': None,
+ 'network_address': None,
+ 'network_address_state': None,
+ 'state': None
+ }
+
+ IP_ADDRESS_MOCK_VALUE = '***.***.*.*'
+ IQN_INITIATOR_MOCK_VALUE = 'iqn.1111-11.com.vmware: host_name_1-111111f'
+
+ @staticmethod
+ def get_host_count_response():
+ return [{"auto_manage_type": "HostManageEnum.OTHERS", "description": "", "existed": True,
+ "fc_host_initiators": {"UnityHostInitiatorList": [{"UnityHostInitiator": {}}]}, "health":
+ {"UnityHealth": {}}, "host_ip_ports": {"UnityHostIpPortList": [{"UnityHostIpPort": {}},
+ {"UnityHostIpPort": {}}]}, "host_pushed_uuid": "1-1-1-1-1",
+ "id": "Host_id_1", "iscsi_host_initiators": {"UnityHostInitiatorList": [{"UnityHostInitiator": {}}]},
+ "name": "host_name_1", "os_type": "XXXXXXXX", "type": "HostTypeEnum.HOST_AUTO"}]
+
+ @staticmethod
+ def get_host_initiators_list():
+ return ['1:1:1:1:1:1:1:1:1', MockHostApi.IQN_INITIATOR_MOCK_VALUE]
+
+ @staticmethod
+ def get_host_details_response(response_type):
+ if response_type == 'api':
+ return {'auto_manage_type': 'HostManageEnum.OTHERS', 'datastores': None, 'description': '',
+ 'fc_host_initiators': [MockSDKObject({'chap_user_name': None,
+ 'health': {'UnityHealth': {}}, 'id': 'HostInitiator_fc_1',
+ 'initiator_id': '1:1:1:1:1:1:1:1:1',
+ 'initiator_source_type': 'HostInitiatorSourceTypeEnum.OPEN_NATIVE', 'is_bound': None,
+ 'is_chap_secret_enabled': False,
+ 'is_ignored': False, 'iscsi_type': None,
+ 'node_wwn': '11:12:13:14:**:**:**:**',
+ 'parent_host': {'UnityHost': {'id': 'Host_id_1'}},
+ 'paths': [MockSDKObject({'id': 'HostInitiator_mock_6', 'is_logged_in': True}),
+ MockSDKObject({'id': 'HostInitiator_mock_5', 'is_logged_in': True}),
+ MockSDKObject({'id': 'HostInitiator_mock_4', 'is_logged_in': True}),
+ MockSDKObject({'id': 'HostInitiator_mock_3', 'is_logged_in': True})],
+ 'port_wwn': '10:10:10:10:10:10:10:10:10', 'source_type': None,
+ 'type': 'HostInitiatorTypeEnum.FC', 'existed': True})],
+ 'host_container': None, 'host_ip_ports': [MockSDKObject({'address': MockHostApi.IP_ADDRESS_MOCK_VALUE,
+ 'host': None, 'id': 'HostNetworkAddress_1',
+ 'is_ignored': None, 'name': None, 'netmask': None, 'type': None,
+ 'v6_prefix_length': None, 'existed': True}),
+ MockSDKObject({'address': 'host_name_1', 'host': None, 'id': 'HostNetworkAddress_2',
+ 'is_ignored': None, 'name': None, 'netmask': None, 'type': None,
+ 'v6_prefix_length': None, 'existed': True})],
+ 'host_luns': MockSDKObject({'lun':
+ [MockSDKObject({'hlu': 1, 'host': None, 'id': 'host_a', 'name': 'host_name_a', 'is_read_only': None,
+ 'lun': {'UnityLun': {}}, 'snap': None, 'type': None, 'existed': True}),
+ MockSDKObject({'hlu': 0, 'host': None, 'id': 'host_b', 'name': 'host_name_b', 'is_read_only': None,
+ 'lun': {'UnityLun': {}}, 'snap': None, 'type': None, 'existed': True})]}),
+ 'host_polled_uuid': None, 'host_pushed_uuid': '1-1-1-1-1',
+ 'host_uuid': None, 'host_v_vol_datastore': None, 'id': 'Host_id_1',
+ 'iscsi_host_initiators': [MockSDKObject({'chap_user_name': None, 'health': {'UnityHealth': {}}, 'id': 'HostInitiator_iscsi_1',
+ 'initiator_id': MockHostApi.IQN_INITIATOR_MOCK_VALUE,
+ 'initiator_source_type': 'HostInitiatorSourceTypeEnum.OPEN_NATIVE', 'is_bound': True,
+ 'is_chap_secret_enabled': False, 'is_ignored': False,
+ 'iscsi_type': 'HostInitiatorIscsiTypeEnum.SOFTWARE', 'node_wwn': None,
+ 'parent_host': {'UnityHost': {'id': 'Host_id_1'}},
+ 'paths': [MockSDKObject({'id': 'HostInitiator_mock_1', 'is_logged_in': True}),
+ MockSDKObject({'id': 'HostInitiator_mock_2', 'is_logged_in': True})],
+ 'port_wwn': None, 'source_type': None, 'type': 'HostInitiatorTypeEnum.ISCSI',
+ 'existed': True})],
+ 'last_poll_time': None, 'name': 'host_name_1',
+ 'os_type': 'XXXXXXXX', 'registration_type': None, 'storage_resources': None, 'tenant': None,
+ 'type': 'HostTypeEnum.HOST_AUTO',
+ 'vms': None, 'existed': True, 'health': {'UnityHealth': {}}}
+ elif response_type == 'module':
+ return {'changed': False,
+ 'host_details': {'auto_manage_type': 'HostManageEnum.OTHERS', 'datastores': None, 'description': '',
+ 'fc_host_initiators': [{'id': 'HostInitiator_fc_1',
+ 'name': '1:1:1:1:1:1:1:1:1',
+ 'paths': [{'id': 'HostInitiator_mock_6',
+ 'is_logged_in': True},
+ {'id': 'HostInitiator_mock_5',
+ 'is_logged_in': True},
+ {'id': 'HostInitiator_mock_4',
+ 'is_logged_in': True},
+ {'id': 'HostInitiator_mock_3',
+ 'is_logged_in': True}]}],
+ 'health': {'UnityHealth': {}},
+ 'host_container': None,
+ 'host_luns': [{'id': "host_a", 'name': 'host_name_a'}, {'id': 'host_b', 'name': 'host_name_b'}],
+ 'host_polled_uuid': None, 'host_pushed_uuid': '1-1-1-1-1',
+ 'host_uuid': None, 'host_v_vol_datastore': None, 'id': 'Host_id_1',
+ 'iscsi_host_initiators': [{'id': 'HostInitiator_iscsi_1',
+ 'name': MockHostApi.IQN_INITIATOR_MOCK_VALUE,
+ 'paths': [{'id': 'HostInitiator_mock_1',
+ 'is_logged_in': True},
+ {'id': 'HostInitiator_mock_2',
+ 'is_logged_in': True}]}],
+ 'last_poll_time': None, 'name': 'host_name_1', 'os_type': 'XXXXXXXX',
+ 'registration_type': None, 'storage_resources': None, 'tenant': None,
+ 'type': 'HostTypeEnum.HOST_AUTO', 'vms': None, 'existed': True,
+ 'network_addresses': [MockHostApi.IP_ADDRESS_MOCK_VALUE, 'host_name_1']}}
+ elif response_type == 'error':
+ return "Incorrect username or password provided."
+
+ @staticmethod
+ def get_host_details_after_network_address_addition(response_type):
+ if response_type == 'api':
+ host_object = MockHostApi.get_host_details_response('api')
+ host_object['host_ip_ports'].append(MockSDKObject({'address': 'net_add_1', 'host': None, 'id': 'HostNetworkAddress_3',
+ 'is_ignored': None, 'name': None, 'netmask': None, 'type': None,
+ 'v6_prefix_length': None, 'existed': True}))
+ return host_object
+ elif response_type == 'module':
+ host_module_response = MockHostApi.get_host_details_response('module')
+ host_module_response['host_details']['network_addresses'].append('net_add_1')
+ host_module_response['changed'] = True
+ return host_module_response
+ elif response_type == 'invalid_address':
+ return 'Please enter valid IPV4 address or host name for network address'
+
+ @staticmethod
+ def get_host_details_after_network_address_removal(response_type):
+ if response_type == 'api':
+ host_object = MockHostApi.get_host_details_response('api')
+ host_object['host_ip_ports'] = [MockSDKObject({'address': MockHostApi.IP_ADDRESS_MOCK_VALUE, 'host': None, 'id': 'HostNetworkAddress_1',
+ 'is_ignored': None, 'name': None, 'netmask': None, 'type': None,
+ 'v6_prefix_length': None, 'existed': True})]
+ return host_object
+ elif response_type == 'module':
+ host_module_response = MockHostApi.get_host_details_response('module')
+ host_module_response['host_details']['network_addresses'].remove('host_name_1')
+ host_module_response['changed'] = True
+ return host_module_response
+ elif response_type == 'invalid_IPV4':
+ return 'Please enter valid IPV4 address for network address'
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_interface_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_interface_api.py
new file mode 100644
index 000000000..6bd53ea9b
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_interface_api.py
@@ -0,0 +1,122 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of interface on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+from unittest.mock import MagicMock
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+from copy import deepcopy
+
+
+class MockInterfaceApi:
+ INTERFACE_MODULE_ARGS = {
+ 'unispherehost': '**.***.**.***',
+ 'port': '123',
+ 'nas_server_id': None,
+ 'nas_server_name': None,
+ 'ethernet_port_name': None,
+ 'ethernet_port_id': None,
+ 'role': None,
+ 'interface_ip': None,
+ 'netmask': None,
+ 'prefix_length': None,
+ 'gateway': None,
+ 'vlan_id': None,
+ 'state': None
+ }
+ ETHERNET_PORT_NAME = "Card Ethernet Port"
+ NETMASK_DUMMY = "255.xx.xx.xx"
+ GATEWAY_DUMMY = "10.***.**.1"
+ INTERFACE_DUMMY = "10.***.**.**"
+
+ NAS_SERVER_OBJECT = \
+ MockSDKObject({'allow_unmapped_user': None, 'cifs_server': {'UnityCifsServerList': [{'UnityCifsServer': {'id': 'cifs_id_0'}}]},
+ 'current_sp': {'UnityStorageProcessor': {'id': 'abc'}},
+ 'current_unix_directory_service': 'NasServerUnixDirectoryServiceEnum.NIS', 'default_unix_user': None,
+ 'default_windows_user': None, 'file_dns_server': {'UnityFileDnsServer': {'id': 'dns_id_0'}},
+ 'file_interface': {'UnityFileInterfaceList': [{'UnityFileInterface': {'id': 'file_interface_id_0'}}]},
+ 'filesystems': {'UnityFileSystemList': [{'UnityFileSystem': {'id': 'fs_id_0'}}]},
+ 'home_sp': {'UnityStorageProcessor': {'id': 'abd'}},
+ 'id': 'nas_id_00', 'is_backup_only': False, 'is_multi_protocol_enabled': False,
+ 'is_packet_reflect_enabled': False, 'is_replication_destination': False,
+ 'is_replication_enabled': True, 'is_windows_to_unix_username_mapping_enabled': None,
+ 'name': 'dummy_nas', 'pool': {'UnityPool': {'id': 'pool_id_0'}},
+ 'preferred_interface_settings': {'UnityPreferredInterfaceSettings': {'id': 'preferred_interface_0'}},
+ 'replication_type': 'ReplicationTypeEnum.MIXED',
+ 'tenant': None, 'virus_checker': {'UnityVirusChecker': {'id': 'cava_id_0'}},
+ 'existed': True})
+
+ INTERFACE_OBJECT = \
+ MockSDKObject({"existed": True,
+ "gateway": GATEWAY_DUMMY,
+ "id": "file_interface_id_0",
+ "ip_address": INTERFACE_DUMMY,
+ "ip_port": {"UnityIpPort": {"id": "ethernet_port_id_0"}},
+ "ip_protocol_version": "IpProtocolVersionEnum.IPv4",
+ "is_disabled": False, "is_preferred": True,
+ "mac_address": "AA:AA:AA:**:**:**",
+ "name": "dummy_if_name",
+ "nas_server": {"UnityNasServer": {"id": "nas_id_00"}},
+ "netmask": NETMASK_DUMMY,
+ "role": "FileInterfaceRoleEnum.BACKUP",
+ "vlan_id": 324})
+
+ FILE_INTERFACE_ROLE_ENUM_DUMMY = {
+ 'PRODUCTION': (0, 'Production'),
+ 'BACKUP': (1, 'Backup')
+ }
+
+ @staticmethod
+ def get_nas_without_interface():
+ nas_object = deepcopy(MockInterfaceApi.NAS_SERVER_OBJECT)
+ nas_object.file_interface['UnityFileInterfaceList'] = []
+ return nas_object
+
+ @staticmethod
+ def get_nas_server_obj_existed_false():
+ nas_object = deepcopy(MockInterfaceApi.NAS_SERVER_OBJECT)
+ nas_object.existed = False
+ return nas_object
+
+ @staticmethod
+ def get_interface_exception_response(response_type):
+ if response_type == 'nas_server_id_exception':
+ return "Failed to get details of NAS server: dummy_nas with error: "
+ elif response_type == 'interface_exception':
+ return "Getting Interface details failed with error "
+ elif response_type == 'add_interface_exception':
+ return "Failed to add interface to NAS Server with error: "
+ elif response_type == 'delete_interface_exception':
+ return "Failed to delete interface with error: "
+
+ @staticmethod
+ def get_interface_error_response(response_type):
+ if response_type == 'invalid_ethernet_port_name':
+ return "Please provide valid value for: ethernet_port_name"
+ elif response_type == 'invalid_vlan_id':
+ return "vlan_id should be in the range of 3 to 4094"
+ elif response_type == 'invalid_interface_ip':
+ return 'The value for interface ip is invalid'
+ elif response_type == 'invalid_gateway':
+ return "The value for gateway is invalid"
+ elif response_type == 'invalid_netmask':
+ return 'Invalid IPV4 address specified for netmask'
+ elif response_type == 'modify_failure':
+ return "Modification of Interfaces for NAS server is not supported through Ansible module"
+ elif response_type == 'no_role':
+ return "Role is a mandatory parameter for adding interface to NAS Server."
+ elif response_type == 'no_ethernet':
+ return "ethernet_port_name/ethernet_port_id is mandatory parameter for adding interface to NAS Server."
+
+ @staticmethod
+ def get_nas_server_obj_errors(response_type):
+ if response_type == 'existed_false':
+ return "NAS server with id nas_id_00 does not exist"
+ elif response_type == 'exception':
+ return "Failed to get details of NAS server with error: "
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nasserver_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nasserver_api.py
new file mode 100644
index 000000000..cb11e6d8e
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nasserver_api.py
@@ -0,0 +1,64 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of NASServer module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+class MockNASServerApi:
+ @staticmethod
+ def get_nas_server_response():
+ return ({"access_policy": "AccessPolicyEnum.NATIVE",
+ "cifs_notify_on_change_dir_depth": 512,
+ "data_reduction_percent": 0,
+ "data_reduction_ratio": 1.0,
+ "data_reduction_size_saved": 0,
+ "description": "",
+ "existed": True,
+ "size_total": 5,
+ "id": "nas0",
+ "name": "nas0",
+ "replication_type": "Remote"})
+
+ @staticmethod
+ def get_replication_params(is_valid=True):
+ rpo = 60
+ if not is_valid:
+ rpo = 2
+ return {'replication_params': {
+ 'replication_name': None,
+ 'new_replication_name': None,
+ 'replication_type': 'local',
+ 'replication_mode': 'asynchronous',
+ 'rpo': rpo,
+ 'remote_system': None,
+ 'destination_nas_server_name': None,
+ 'destination_pool_name': 'pool_test_name_1',
+ 'destination_pool_id': None},
+ 'replication_state': 'enable',
+ 'state': 'present'
+ }
+ else:
+ return {'replication_params': {
+ 'replication_name': None,
+ 'replication_mode': 'asynchronous',
+ 'new_replication_name': None,
+ 'replication_type': 'remote',
+ 'rpo': rpo,
+ 'remote_system': {
+ 'remote_system_host': '1.1.1.1',
+ 'remote_system_verifycert': False,
+ 'remote_system_username': 'username',
+ 'remote_system_password': 'password',
+ 'remote_system_port': 1
+ },
+ 'destination_nas_server_name': None,
+ 'destination_pool_name': 'pool_test_name_1',
+ 'destination_pool_id': None},
+ 'replication_state': 'enable',
+ 'state': 'present'
+ }
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfs_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfs_api.py
new file mode 100644
index 000000000..bd933f7ce
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfs_api.py
@@ -0,0 +1,187 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of nfs module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+from mock.mock import MagicMock
+
+
+class MockNfsApi:
+ NFS_MODULE_ARGS = {
+ 'unispherehost': '**.***.**.***',
+ 'port': '123',
+ 'description': None,
+ 'anonymous_uid': None,
+ 'anonymous_gid': None,
+ 'min_security': None,
+ 'default_access': None,
+ 'nas_server_id': None,
+ 'nas_server_name': None,
+ 'nfs_export_id': None,
+ 'nfs_export_name': None,
+ 'snapshot_name': None,
+ 'snapshot_id': None,
+ 'filesystem_name': None,
+ 'filesystem_id': None,
+ 'host_state': None,
+ 'adv_host_mgmt_enabled': None,
+ 'no_access_hosts': None,
+ 'read_only_hosts': None,
+ 'read_only_root_hosts': None,
+ 'read_write_hosts': None,
+ 'read_write_root_hosts': None,
+ 'path': None,
+ 'state': None
+ }
+
+ DUMMY_DOMAIN_VALUE = "google.com"
+ DUMMY_SUBNET_VALUE = "**.***.2.2/10"
+
+ FILESYSTEM_OBJECT = MockSDKObject({"access_policy": "AccessPolicyEnum.UNIX", "cifs_notify_on_change_dir_depth": 512,
+ "data_reduction_percent": 0, "data_reduction_ratio": 1.0, "data_reduction_size_saved": 0,
+ "description": "", "existed": True,
+ "folder_rename_policy": "FSRenamePolicyEnum.SMB_RENAME_FORBIDDEN",
+ "format": "FSFormatEnum.UFS64", "host_io_size": "HostIOSizeEnum.GENERAL_16K",
+ "id": "fs_id_1", "is_advanced_dedup_enabled": False, "is_cifs_notify_on_access_enabled": False,
+ "is_cifs_notify_on_write_enabled": False, "is_cifs_op_locks_enabled": True,
+ "is_cifs_sync_writes_enabled": False, "is_data_reduction_enabled": False, "is_read_only": False,
+ "is_smbca": False, "is_thin_enabled": True, "locking_policy": "FSLockingPolicyEnum.MANDATORY",
+ "min_size_allocated": 0, "name": "fs_dummy_name", "nas_server": {"id": "nas_id_1"},
+ "nfs_share": [{"id": "NFSShare_id_1"}], "pool": {"id": "pool_id_1"},
+ "pool_full_policy": "ResourcePoolFullPolicyEnum.FAIL_WRITES", "snap_count": 0, "snaps_size": 0,
+ "snaps_size_allocated": 0, "storage_resource": {"id": "stg_id_1"},
+ "supported_protocols": "FSSupportedProtocolEnum.NFS",
+ "tiering_policy": "TieringPolicyEnum.AUTOTIER_HIGH",
+ "type": "FilesystemTypeEnum.FILESYSTEM"})
+
+ NFS_SHARE_OBJECT = MockSDKObject({"anonymous_gid": 4294967294, "anonymous_uid": 4294967294,
+ "default_access": "NFSShareDefaultAccessEnum.NO_ACCESS", "description": "", "existed": True,
+ "export_option": 1, "export_paths": ["**.***.**.**:/nfsshare_dummy_name"],
+ "filesystem": MockSDKObject({"id": "fs_id_1", "name": "fs_name1", "nas_server": "not_none"}),
+ "id": "NFSShare_id_1",
+ "min_security": "NFSShareSecurityEnum.SYS",
+ "modification_time": "2022-04-24 17:07:57.749000+00:00",
+ "name": "nfsshare_dummy_name",
+ "no_access_hosts": None,
+ "no_access_hosts_string": None,
+ "read_only_hosts": None,
+ "read_only_hosts_string": None,
+ "read_only_root_access_hosts": None,
+ "read_only_root_hosts_string": None,
+ "read_write_hosts": None,
+ "read_write_hosts_string": None,
+ "read_write_root_hosts_string": None,
+ "root_access_hosts": None,
+ "path": "/",
+ "role": "NFSShareRoleEnum.PRODUCTION",
+ "type": "NFSTypeEnum.NFS_SHARE"})
+
+ NFS_SHARE_DISPLAY_ATTR = {'anonymous_gid': 4294967294, 'anonymous_uid': 4294967294, 'creation_time': '2022-03-09 15:05:34.720000+00:00',
+ 'default_access': 'NFSShareDefaultAccessEnum.NO_ACCESS', 'description': '', 'export_option': 1,
+ 'export_paths': ['**.***.**.**:/nfsshare_dummy_name'],
+ 'filesystem': {'UnityFileSystem': {'id': 'fs_id_1', 'name': 'fs_name1'}}, 'host_accesses': None,
+ 'id': 'NFSShare_id_1', 'is_read_only': None, 'min_security': 'NFSShareSecurityEnum.SYS',
+ 'modification_time': '2022-04-24 17:07:57.749000+00:00', 'name': 'nfsshare_dummy_name',
+ 'nfs_owner_username': None, 'no_access_hosts': None,
+ 'no_access_hosts_string': None,
+ 'path': '/', 'read_only_hosts': None, 'read_only_hosts_string': '', 'read_only_root_access_hosts': None,
+ 'read_only_root_hosts_string': '', 'read_write_hosts': None, 'read_write_hosts_string': '',
+ 'read_write_root_hosts_string': '', 'role': 'NFSShareRoleEnum.PRODUCTION', 'root_access_hosts': None,
+ 'snap': None, 'type': 'NFSTypeEnum.NFS_SHARE', 'existed': True,
+ 'nas_server': {'UnityNasServer': {'id': 'nas_id_1', 'name': 'lglad068'}}}
+
+ @staticmethod
+ def get_nfs_share_object_on_host_access(action, advhostmgmt):
+ if advhostmgmt:
+ if action == 'add':
+ nfs_share_object = MockNfsApi.NFS_SHARE_OBJECT
+ return nfs_share_object
+ elif action == 'remove':
+ nfs_share_object = MockNfsApi.NFS_SHARE_OBJECT
+ nfs_share_object.no_access_hosts = {
+ 'UnityHostList': [
+ {
+ 'UnityHost': {
+ 'id': 'Host_1389'
+ }
+ },
+ {
+ 'UnityHost': {
+ 'id': 'Host_1330'
+ }
+ }
+ ]
+ }
+ return nfs_share_object
+ else:
+ if action == 'add':
+ nfs_share_display_attr = MockNfsApi.NFS_SHARE_OBJECT
+ nfs_share_display_attr.read_only_root_hosts_string = ''
+ return nfs_share_display_attr
+ elif action == 'remove':
+ nfs_share_display_attr = MockNfsApi.NFS_SHARE_OBJECT
+ nfs_share_display_attr.read_only_root_hosts_string = '*.google.com,**.***.0.0/255.***.*.*'
+ return nfs_share_display_attr
+
+ @staticmethod
+ def get_nfs_share_display_attr_on_host_access(action, advhostmgmt):
+ if advhostmgmt:
+ if action == 'add':
+ nfs_share_display_attr = MockNfsApi.NFS_SHARE_DISPLAY_ATTR
+ nfs_share_display_attr['no_access_hosts'] = {
+ 'UnityHostList': [
+ {
+ 'UnityHost': {
+ 'id': 'Host_1389'
+ }
+ },
+ {
+ 'UnityHost': {
+ 'id': 'Host_1330'
+ }
+ }
+ ]
+ }
+ return nfs_share_display_attr
+ elif action == 'remove':
+ nfs_share_display_attr = MockNfsApi.NFS_SHARE_DISPLAY_ATTR
+ nfs_share_display_attr['no_access_hosts'] = None
+ return nfs_share_display_attr
+ else:
+ if action == 'add':
+ nfs_share_display_attr = MockNfsApi.NFS_SHARE_DISPLAY_ATTR
+ nfs_share_display_attr['read_only_root_hosts_string'] = '*.google.com,**.***.0.0/255.***.*.*'
+ return nfs_share_display_attr
+ elif action == 'remove':
+ nfs_share_display_attr = MockNfsApi.NFS_SHARE_DISPLAY_ATTR
+ nfs_share_display_attr['read_only_root_hosts_string'] = ''
+ return nfs_share_display_attr
+
+ @staticmethod
+ def get_host_obj(id):
+ if id == 1:
+ host_1 = MagicMock()
+ host_1.id = 'Host_1389'
+ host_1.name = 'host_1'
+ return host_1
+ elif id == 2:
+ host_2 = MagicMock()
+ host_2.id = 'Host_1330'
+ host_2.name = 'host_2'
+ return host_2
+
+ @staticmethod
+ def host_access_negative_response(response_type):
+ if response_type == 'subnet_validation':
+ return "Subnet should be in format 'IP address/netmask' or 'IP address/prefix length'"
+ elif response_type == 'advhostmngmnt_field_validation':
+ return "'host_state' and 'adv_host_mgmt_enabled' is required along with: read_only_root_hosts"
+ elif response_type == 'modify_exception':
+ return 'Failed to modify nfs error: '
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfsserver_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfsserver_api.py
new file mode 100644
index 000000000..1254f0035
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfsserver_api.py
@@ -0,0 +1,259 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of NFS server module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+from unittest.mock import MagicMock
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+
+
+class MockNFSServerApi:
+ NFS_SERVER_MODULE_ARGS = {
+ 'nas_server_id': None,
+ 'nas_server_name': None,
+ 'host_name': None,
+ 'is_secure_enabled': None,
+ 'kerberos_domain_controller_type': None,
+ 'kerberos_domain_controller_username': None,
+ 'kerberos_domain_controller_password': None,
+ 'is_extended_credentials_enabled': None,
+ 'nfs_v4_enabled': None,
+ 'nfs_server_id': None,
+ 'interfaces': None,
+ 'remove_spn_from_kerberos': None,
+ 'state': None
+ }
+
+ NAS_SERVER_OBJ = MockSDKObject({"id": "nas_10"})
+
+ @staticmethod
+ def get_nfs_server_details_method_response():
+ return {
+ "credentials_cache_ttl": "0:15:00",
+ "existed": True,
+ "file_interfaces": {
+ "UnityFileInterfaceList": [{
+ "UnityFileInterface": {
+ "hash": 1111111111111,
+ "id": "if_3"
+ }
+ }]
+ },
+ "hash": 1111111111111,
+ "id": "nfs_95",
+ "is_extended_credentials_enabled": False,
+ "is_secure_enabled": False,
+ "nas_server": MockNFSServerApi.NAS_SERVER_OBJ,
+ "nfs_v4_enabled": True,
+ 'servicee_principal_name': None
+ }
+
+ @staticmethod
+ def get_nfs_server_details():
+ return [MockSDKObject({
+ "credentials_cache_ttl": "0:15:00",
+ "existed": True,
+ "file_interfaces": {
+ "UnityFileInterfaceList": [{
+ "UnityFileInterface": {
+ "hash": 1111111111111,
+ "id": "if_3"
+ }
+ }]
+ },
+ "hash": 1111111111111,
+ "id": "nfs_95",
+ "is_extended_credentials_enabled": False,
+ "is_secure_enabled": False,
+ "nas_server": MockNFSServerApi.NAS_SERVER_OBJ,
+ "nfs_v4_enabled": True,
+ 'servicee_principal_name': None})]
+
+ @staticmethod
+ def validate_params_exception():
+ return "Please provide valid value for:"
+
+ @staticmethod
+ def create_nfs_server_without_nas_server_id():
+ return "Please provide nas server id/name to create NFS server."
+
+ @staticmethod
+ def get_nas_server_id_api_exception():
+ return "Failed to get details of NAS server:"
+
+ @staticmethod
+ def create_nfs_server_without_is_secure_enabled():
+ return "For create NFS Server nfs_v4_enabled and is_secure_enabled should be true."
+
+ @staticmethod
+ def create_nfs_server_with_api_exception():
+ return "Failed to create NFS server with on NAS Server"
+
+ @staticmethod
+ def get_nfs_server_api_exception():
+ return "Incorrect username or password provided."
+
+ @staticmethod
+ def get_nfs_server_api_exception_1():
+ return "Failed to get details of NFS Server with error"
+
+ @staticmethod
+ def delete_exception():
+ return "Failed to delete NFS server:"
+
+ @staticmethod
+ def modify_error_msg():
+ return "Modification of NFS Server parameters is not supported through Ansible module"
+
+ @staticmethod
+ def get_nas_server_details():
+ return {
+ "UnityNasServer": {
+ "cifs_server": {
+ "UnityCifsServerList": [{
+ "UnityCifsServer": {
+ "hash": 1111111111111,
+ "id": "cifs_60"
+ }
+ }]
+ },
+ "current_sp": {
+ "UnityStorageProcessor": {
+ "hash": 1111111111111,
+ "id": "spa"
+ }
+ },
+ "current_unix_directory_service": "NasServerUnixDirectoryServiceEnum.NONE",
+ "existed": True,
+ "file_dns_server": {
+ "UnityFileDnsServer": {
+ "hash": 1111111111111,
+ "id": "dns_11"
+ }
+ },
+ "file_interface": {
+ "UnityFileInterfaceList": [{
+ "UnityFileInterface": {
+ "hash": -1111111111111,
+ "id": "if_43"
+ }
+ }]
+ },
+ "hash": -1111111111111,
+ "health": {
+ "UnityHealth": {
+ "hash": 1111111111111
+ }
+ },
+ "home_sp": {
+ "UnityStorageProcessor": {
+ "hash": -1111111111111,
+ "id": "spa"
+ }
+ },
+ "id": "nas_18",
+ "is_backup_only": False,
+ "is_multi_protocol_enabled": False,
+ "is_packet_reflect_enabled": False,
+ "is_replication_destination": False,
+ "is_replication_enabled": False,
+ "name": "test_nas1",
+ "pool": {
+ "UnityPool": {
+ "hash": -1111111111111,
+ "id": "pool_7"
+ }
+ },
+ "preferred_interface_settings": {
+ "UnityPreferredInterfaceSettings": {
+ "hash": -1111111111111,
+ "id": "preferred_if_16"
+ }
+ },
+ "replication_type": "ReplicationTypeEnum.NONE",
+ "size_allocated": 1111111111111,
+ "virus_checker": {
+ "UnityVirusChecker": {
+ "hash": 1111111111111,
+ "id": "cava_18"
+ }
+ }
+ }
+ }
+
+ @staticmethod
+ def get_nas_server_id():
+ return MockSDKObject({
+ "cifs_server": {
+ "UnityCifsServerList": [{
+ "UnityCifsServer": {
+ "hash": 1111111111111,
+ "id": "cifs_34"
+ }
+ }]
+ },
+ "current_sp": {
+ "UnityStorageProcessor": {
+ "hash": 1111111111111,
+ "id": "spb"
+ }
+ },
+ "current_unix_directory_service": "NasServerUnixDirectoryServiceEnum.NIS",
+ "existed": True,
+ "file_dns_server": {
+ "UnityFileDnsServer": {
+ "hash": 1111111111111,
+ "id": "dns_12"
+ }
+ },
+ "file_interface": {
+ "UnityFileInterfaceList": [{
+ "UnityFileInterface": {
+ "hash": 1111111111111,
+ "id": "if_37"
+ }
+ }]
+ },
+ "hash": 1111111111111,
+ "health": {
+ "UnityHealth": {
+ "hash": 1111111111111
+ }
+ },
+ "home_sp": {
+ "UnityStorageProcessor": {
+ "hash": 1111111111111,
+ "id": "spb"
+ }
+ },
+ "id": "nas_10",
+ "is_backup_only": False,
+ "is_multi_protocol_enabled": False,
+ "is_packet_reflect_enabled": False,
+ "is_replication_destination": False,
+ "is_replication_enabled": True,
+ "name": "dummy_nas",
+ "pool": {
+ "UnityPool": {
+ "hash": 1111111111111,
+ "id": "pool_7"
+ }
+ },
+ "preferred_interface_settings": {
+ "UnityPreferredInterfaceSettings": {
+ "hash": 1111111111111,
+ "id": "preferred_if_10"
+ }
+ },
+ "replication_type": "ReplicationTypeEnum.REMOTE",
+ "size_allocated": 1111111111111,
+ "virus_checker": {
+ "UnityVirusChecker": {
+ "hash": 1111111111111,
+ "id": "cava_10"}}})
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_sdk_response.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_sdk_response.py
new file mode 100644
index 000000000..2556870ba
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_sdk_response.py
@@ -0,0 +1,32 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock SDKResponse for Unit tests for Unity modules"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+class MockSDKObject:
+ def __init__(self, data):
+ self.skip_list = ['skip_list']
+ for key, value in data.items():
+ setattr(self, key, value)
+
+ def add_to_skip_list(self, key):
+ self.skip_list.append(key)
+
+ def _get_properties(self):
+ data = {}
+ for attr, value in self.__dict__.items():
+ if attr not in self.skip_list:
+ data[attr] = value
+ return data
+
+ def get_id(self):
+ return "res_0"
+
+ def name(self):
+ return "res_0"
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_storagepool_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_storagepool_api.py
new file mode 100644
index 000000000..1ec9fcadd
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_storagepool_api.py
@@ -0,0 +1,168 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of storagepool module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+
+
+class MockStoragePoolApi:
+ STORAGE_POOL_MODULE_ARGS = {
+ 'unispherehost': '**.***.**.***',
+ 'port': '123',
+ 'pool_name': None,
+ 'pool_id': None,
+ 'new_pool_name': None,
+ 'pool_description': None,
+ 'fast_cache': None,
+ 'fast_vp': None,
+ 'raid_groups': {},
+ 'state': None
+ }
+ RAID_TYPE_5 = 'RaidTypeEnum.RAID5'
+
+ @staticmethod
+ def get_pool_details_response(response_type):
+ if response_type == 'get_pool':
+ return {'alert_threshold': 84, 'creation_time': '2021-11-11 11:11:11+00:00',
+ 'description': '', 'harvest_state': 'UsageHarvestStateEnum.IDLE',
+ 'health': {'UnityHealth': {}}, 'id': 'pool_mock_1',
+ 'is_all_flash': True, 'is_empty': False, 'is_fast_cache_enabled': False,
+ 'is_harvest_enabled': True, 'is_snap_harvest_enabled': False,
+ 'metadata_size_subscribed': 1, 'metadata_size_used': 1,
+ 'name': 'Ansible_Unity_TEST_1', 'object_id': 1, 'pool_fast_vp': {'UnityPoolFastVp': {}},
+ 'pool_space_harvest_high_threshold': 95.0, 'pool_space_harvest_low_threshold': 85.0, 'pool_type':
+ 'StoragePoolTypeEnum.DYNAMIC', 'raid_type': MockStoragePoolApi.RAID_TYPE_5, 'rebalance_progress': None, 'size_free': 1,
+ 'size_subscribed': 1, 'size_total': 1, 'size_used': 1, 'snap_size_subscribed':
+ 1, 'snap_size_used': 1, 'snap_space_harvest_high_threshold': 25.0, 'snap_space_harvest_low_threshold':
+ 20.0, 'tiers': {'UnityPoolTierList': [{'UnityPoolTier': {}}, {'UnityPoolTier': {}}, {'UnityPoolTier': {}}]}, 'existed': True}
+ elif response_type == 'pool_object':
+ return {'alert_threshold': 84, 'creation_time': '2021-11-11 11:11:11+00:00',
+ 'description': '', 'harvest_state': 'UsageHarvestStateEnum.IDLE',
+ 'health': {'UnityHealth': {}}, 'id': 'pool_mock_1',
+ 'is_all_flash': True, 'is_empty': False, 'is_fast_cache_enabled': False,
+ 'is_harvest_enabled': True, 'is_snap_harvest_enabled': False,
+ 'metadata_size_subscribed': 1, 'metadata_size_used': 1,
+ 'name': 'Ansible_Unity_TEST_1', 'object_id': 1,
+ 'pool_fast_vp': {'UnityPoolFastVp': {}},
+ 'pool_space_harvest_high_threshold': 95.0,
+ 'pool_space_harvest_low_threshold': 85.0, 'pool_type': 'StoragePoolTypeEnum.DYNAMIC',
+ 'raid_type': MockStoragePoolApi.RAID_TYPE_5, 'rebalance_progress': None, 'size_free': 1,
+ 'size_subscribed': 1, 'size_total': 1, 'size_used': 1,
+ 'snap_size_subscribed': 1, 'snap_size_used': 1,
+ 'snap_space_harvest_high_threshold': 25.0, 'snap_space_harvest_low_threshold': 20.0,
+ 'tiers': MockSDKObject({'disk_count': [5, 0, 0], 'name': ['Extreme Performance', 'Performance', 'Capacity'],
+ 'pool_units': [{'UnityPoolUnitList': [{'UnityPoolUnit': {'id': 'pool_unit_mock_1'}}]}, None, None],
+ 'raid_type': [MockStoragePoolApi.RAID_TYPE_5, 'RaidTypeEnum.NONE', 'RaidTypeEnum.NONE'],
+ 'size_free': [1, 0, 0],
+ 'size_moving_down': [0, 0, 0], 'size_moving_up': [0, 0, 0],
+ 'size_moving_within': [0, 0, 0], 'size_total': [1, 0, 0],
+ 'size_used': [1, 0, 0], 'stripe_width': ['RaidStripeWidthEnum._5', None, None],
+ 'tier_type': ['TierTypeEnum.EXTREME_PERFORMANCE', 'TierTypeEnum.PERFORMANCE', 'TierTypeEnum.CAPACITY'],
+ 'existed': True}),
+ 'existed': True}
+ elif response_type == 'disk_list':
+ return [MockSDKObject({"bus_id": 99, "current_speed": 1, "disk_group": {"UnityDiskGroup": {"id": "disk_mock_1"}},
+ "disk_technology": MockSDKObject({"name": "mock_disk_tech"}), "emc_part_number": "XXXXXXXX",
+ "emc_serial_number": "XXXXXXXX", "existed": True, "health": {"UnityHealth": {}},
+ "id": "disk_mock_2", "is_fast_cache_in_use": False, "is_in_use": True,
+ "is_sed": False, "manufacturer": "mock_disk_manufacturer",
+ "max_speed": 1, "model": "mock_disk_model", "name": "Drive 12",
+ "needs_replacement": False, "pool": MockSDKObject({"id": "pool_5", "name": "Pool_Mock_TEST_2", "UnityPool": {}}),
+ "raw_size": 1, "rpm": 0, "size": 1, "slot_number": 12,
+ "tier_type": MockSDKObject({"name": "EXTREME_PERFORMANCE"}), "vendor_size": 1,
+ "version": "S109", "wwn": "00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00"}),
+ MockSDKObject({"bus_id": 99, "current_speed": 1,
+ "disk_group": {"UnityDiskGroup": {"id": "disk_mock_1"}},
+ "disk_technology": MockSDKObject({"name": "mock_disk_tech"}), "emc_part_number": "XXXXXXXX",
+ "emc_serial_number": "XXXXXXXX", "existed": True, "health": {"UnityHealth": {}},
+ "id": "mock_disk_id", "is_fast_cache_in_use": False, "is_in_use": True, "is_sed": False,
+ "manufacturer": "mock_disk_manufacturer", "max_speed": 1, "model": "mock_disk_model",
+ "name": "disk_disk_name", "needs_replacement": False,
+ "pool": MockSDKObject({"id": "pool_mock_1", "name": "Ansible_Unity_TEST_1"}),
+ "raw_size": 1, "rpm": 0, "size": 1,
+ "slot_number": 13, "tier_type": MockSDKObject({"name": "EXTREME_PERFORMANCE"}),
+ "vendor_size": 1, "version": "S109",
+ "wwn": "01:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00"})]
+ elif response_type == 'module':
+ return {"storage_pool_details":
+ {"alert_threshold": 84, "creation_time": "2021-11-11 11:11:11+00:00", "description": "",
+ "drives": [{"disk_technology": "mock_disk_tech", "id": "mock_disk_id", "name": "disk_disk_name",
+ "size": 1, "tier_type": "EXTREME_PERFORMANCE"}],
+ "existed": True, "harvest_state": "UsageHarvestStateEnum.IDLE",
+ "health": {"UnityHealth": {}},
+ "id": "pool_mock_1", "is_all_flash": True, "is_empty": False,
+ "is_fast_cache_enabled": False, "is_fast_vp_enabled": True,
+ "is_harvest_enabled": True, "is_snap_harvest_enabled": False,
+ "metadata_size_subscribed": 1, "metadata_size_used":
+ 1, "name": "Ansible_Unity_TEST_1", "object_id": 1,
+ "pool_fast_vp": {"UnityPoolFastVp": {}},
+ "pool_space_harvest_high_threshold": 95.0,
+ "pool_space_harvest_low_threshold": 85.0, "pool_type": "StoragePoolTypeEnum.DYNAMIC",
+ "raid_type": "RaidTypeEnum.RAID5", "rebalance_progress": None, "size_free": 1,
+ "size_free_with_unit": "1.0 B", "size_subscribed": 1, "size_subscribed_with_unit": "1.0 B",
+ "size_total": 1, "size_total_with_unit": "1.0 B", "size_used": 1, "size_used_with_unit": "1.0 B",
+ "snap_size_subscribed": 1, "snap_size_subscribed_with_unit": "1.0 B", "snap_size_used": 1,
+ "snap_size_used_with_unit": "1.0 B", "snap_space_harvest_high_threshold": 25.0, "snap_space_harvest_low_threshold": 20.0,
+ "tiers": {"UnityPoolTierList": [{"disk_count": [5, 0, 0], "existed": True,
+ "name": ["Extreme Performance", "Performance", "Capacity"],
+ "pool_units": [{"UnityPoolUnitList": [{"UnityPoolUnit": {"id": "pool_unit_mock_1"}}]}, None, None],
+ "raid_type": ["RaidTypeEnum.RAID5", "RaidTypeEnum.NONE", "RaidTypeEnum.NONE"],
+ "size_free": [1, 0, 0], "size_moving_down": [0, 0, 0],
+ "size_moving_up": [0, 0, 0],
+ "size_moving_within": [0, 0, 0],
+ "size_total": [1, 0, 0],
+ "size_used": [1, 0, 0],
+ "stripe_width": ["RaidStripeWidthEnum._5", None, None],
+ "tier_type": ["TierTypeEnum.EXTREME_PERFORMANCE", "TierTypeEnum.PERFORMANCE",
+ "TierTypeEnum.CAPACITY"]}]}}}
+ elif response_type == 'error':
+ return 'Get details of storage pool failed with error: '
+
+ @staticmethod
+ def create_pool_response(response_type):
+ if response_type == 'api':
+ return {"storage_pool_details":
+ {"alert_threshold": 50, "creation_time": "2022-03-08 10:51:08+00:00", "description": "Unity test pool.",
+ "drives": [{"disk_technology": "SAS", "id": "disk_id_1", "name": "DPE Drive 1",
+ "size": 1, "tier_type": "PERFORMANCE"},
+ {"disk_technology": "SAS", "id": "disk_id_2", "name": "DPE Drive 2",
+ "size": 1, "tier_type": "PERFORMANCE"},
+ {"disk_technology": "SAS", "id": "disk_id_3", "name": "DPE Drive 3",
+ "size": 1, "tier_type": "PERFORMANCE"}],
+ "existed": True, "harvest_state": "UsageHarvestStateEnum.IDLE",
+ "health": {"UnityHealth": {}},
+ "id": "pool_id_1", "is_all_flash": False, "is_empty": True,
+ "is_fast_cache_enabled": False, "is_fast_vp_enabled": True,
+ "is_harvest_enabled": True, "is_snap_harvest_enabled": True,
+ "metadata_size_subscribed": 0, "metadata_size_used":
+ 0, "name": "Mock_Test", "object_id": 123,
+ "pool_fast_vp": {"UnityPoolFastVp": {}},
+ "pool_space_harvest_high_threshold": 59.0,
+ "pool_space_harvest_low_threshold": 40.0, "pool_type": "StoragePoolTypeEnum.DYNAMIC",
+ "raid_type": "RaidTypeEnum.RAID10", "rebalance_progress": None, "size_free": 1,
+ "size_free_with_unit": "1 GB", "size_subscribed": 0, "size_subscribed_with_unit": "0B",
+ "size_total": 1, "size_total_with_unit": "1 GB", "size_used": 0, "size_used_with_unit": "0B",
+ "snap_size_subscribed": 0, "snap_size_subscribed_with_unit": "0B", "snap_size_used": 0,
+ "snap_size_used_with_unit": "0B", "snap_space_harvest_high_threshold": 80.0, "snap_space_harvest_low_threshold": 60.0,
+ "tiers": {"UnityPoolTierList": [{"disk_count": [0, 3, 0], "existed": True,
+ "name": ["Extreme Performance", "Performance", "Capacity"],
+ "pool_units": [{"UnityPoolUnitList": [{"UnityPoolUnit": {"id": "rg_id_1"}},
+ {"UnityPoolUnit": {"id": "rg_id_2"}}]}, None],
+ "raid_type": ["RaidTypeEnum.NONE", "RaidTypeEnum.RAID10", "RaidTypeEnum.NONE"],
+ "size_free": [0, 1, 0], "size_moving_down": [0, 0, 0],
+ "size_moving_up": [0, 0, 0],
+ "size_moving_within": [0, 0, 0],
+ "size_total": [0, 1, 0],
+ "size_used": [0, 0, 0],
+ "stripe_width": [None, "RaidStripeWidthEnum._2", None],
+ "tier_type": ["TierTypeEnum.EXTREME_PERFORMANCE", "TierTypeEnum.PERFORMANCE",
+ "TierTypeEnum.CAPACITY"]}]}}}
+ elif response_type == 'error':
+ return 'Failed to create storage pool with error: '
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_volume_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_volume_api.py
new file mode 100644
index 000000000..82097a338
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_volume_api.py
@@ -0,0 +1,174 @@
+# Copyright: (c) 2023, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of volume module on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response import MockSDKObject
+
+
+class MockVolumeApi:
+
+ VOLUME_MODULE_ARGS = {
+ 'unispherehost': '**.***.**.***',
+ 'port': '123',
+ 'vol_name': None,
+ 'vol_id': None,
+ 'description': None,
+ 'pool_name': None,
+ 'pool_id': None,
+ 'size': None,
+ 'cap_unit': None,
+ 'is_thin': None,
+ 'compression': None,
+ 'advanced_dedup': None,
+ 'sp': None,
+ 'io_limit_policy': None,
+ 'snap_schedule': None,
+ 'host_name': None,
+ 'host_id': None,
+ 'hosts': {},
+ 'hlu': None,
+ 'mapping_state': None,
+ 'new_vol_name': None,
+ 'tiering_policy': None,
+ 'state': None,
+ }
+
+ pool = {
+ 'alert_threshold': 60,
+ 'creation_time': '2021-10-18 12:51:27+00:00',
+ 'description': 'A2Z',
+ 'existed': True,
+ 'harvest_state': 'UsageHarvestStateEnum.IDLE',
+ 'hash': 8778647453970,
+ 'health': {'UnityHealth': {'hash': 8778647453730}},
+ 'id': 'pool_3',
+ 'is_all_flash': True,
+ 'is_empty': False,
+ 'is_fast_cache_enabled': False,
+ 'is_harvest_enabled': True,
+ 'is_snap_harvest_enabled': True,
+ 'metadata_size_subscribed': 646124142592,
+ 'metadata_size_used': 357287591936,
+ 'name': 'Extreme_Perf_tier',
+ 'object_id': 12884901892,
+ 'pool_fast_vp': {'UnityPoolFastVp': {'hash': 8778647539688}},
+ 'pool_space_harvest_high_threshold': 95.0,
+ 'pool_space_harvest_low_threshold': 70.5,
+ 'pool_type': 'StoragePoolTypeEnum.TRADITIONAL',
+ 'raid_type': 'RaidTypeEnum.RAID5',
+ 'size_free': 1174673555456,
+ 'size_subscribed': 8703230820352,
+ 'size_total': 3141768577024,
+ 'size_used': 1802576257024,
+ 'snap_size_subscribed': 290195193856,
+ 'snap_size_used': 43098112,
+ 'snap_space_harvest_high_threshold': 20.5,
+ 'snap_space_harvest_low_threshold': 1.0,
+ 'tiers': {'UnityPoolTierList': [{'UnityPoolTier': {'hash': 8778647538737}},
+ {'UnityPoolTier': {'hash': 8778647538749}},
+ {'UnityPoolTier': {'hash': 8778647526797}}]},
+ }
+
+ @staticmethod
+ def create_volume_response(response_type):
+ if response_type == 'api':
+ return {'volume_details': {
+ 'current_node': 'NodeEnum.SPB',
+ 'data_reduction_percent': 0,
+ 'data_reduction_ratio': 1.0,
+ 'data_reduction_size_saved': 0,
+ 'default_node': 'NodeEnum.SPB',
+ 'description': None,
+ 'effective_io_limit_max_iops': None,
+ 'effective_io_limit_max_kbps': None,
+ 'existed': True,
+ 'family_base_lun': {'UnityLun': {}},
+ 'family_clone_count': 0,
+ 'hash': 8769317548849,
+ 'health': {'UnityHealth': {}},
+ 'host_access': [],
+ 'id': 'sv_214551',
+ 'io_limit_policy': True,
+ 'is_advanced_dedup_enabled': True,
+ 'is_compression_enabled': True,
+ 'is_data_reduction_enabled': True,
+ 'is_replication_destination': False,
+ 'is_snap_schedule_paused': False,
+ 'is_thin_clone': False,
+ 'is_thin_enabled': True,
+ 'metadata_size': 3758096384,
+ 'metadata_size_allocated': 3221225472,
+ 'name': 'Atest',
+ 'per_tier_size_used': [3489660928, 0, 0],
+ 'pool': {'id': 'pool_3', 'name': 'Extreme_Perf_tier'},
+ 'size_allocated': 0,
+ 'size_total': 2147483648,
+ 'size_total_with_unit': '2.0 GB',
+ 'size_used': None,
+ 'snap_count': 0,
+ 'snap_schedule': None,
+ 'snap_wwn': '60:06:01:60:5C:F0:50:00:F6:42:70:38:7A:90:40:FF',
+ 'snaps_size': 0,
+ 'snaps_size_allocated': 0,
+ 'storage_resource': {'UnityStorageResource': {}},
+ 'tiering_policy': 'TieringPolicyEnum.AUTOTIER_HIGH',
+ 'type': 'LUNTypeEnum.STANDALONE',
+ 'wwn': '60:06:01:60:5C:F0:50:00:41:25:EA:63:94:92:92:AE',
+ }}
+ else:
+ return 'Create volume operation Atest failed with error'
+
+ @staticmethod
+ def modify_volume_response(response_type):
+ if response_type == 'api':
+ return {'volume_details': {
+ 'current_node': 'NodeEnum.SPB',
+ 'data_reduction_percent': 0,
+ 'data_reduction_ratio': 1.0,
+ 'data_reduction_size_saved': 0,
+ 'default_node': 'NodeEnum.SPB',
+ 'description': None,
+ 'effective_io_limit_max_iops': None,
+ 'effective_io_limit_max_kbps': None,
+ 'existed': True,
+ 'family_base_lun': {'UnityLun': {}},
+ 'family_clone_count': 0,
+ 'hash': 8769317548849,
+ 'health': {'UnityHealth': {}},
+ 'host_access': [],
+ 'id': 'sv_214551',
+ 'io_limit_policy': None,
+ 'is_advanced_dedup_enabled': False,
+ 'is_compression_enabled': True,
+ 'is_data_reduction_enabled': True,
+ 'is_replication_destination': False,
+ 'is_snap_schedule_paused': False,
+ 'is_thin_clone': False,
+ 'is_thin_enabled': True,
+ 'metadata_size': 3758096384,
+ 'metadata_size_allocated': 3221225472,
+ 'name': 'Atest',
+ 'per_tier_size_used': [3489660928, 0, 0],
+ 'pool': {'id': 'pool_3', 'name': 'Extreme_Perf_tier'},
+ 'size_allocated': 0,
+ 'size_total': 2147483648,
+ 'size_total_with_unit': '2.0 GB',
+ 'size_used': None,
+ 'snap_count': 0,
+ 'snap_schedule': None,
+ 'snap_wwn': '60:06:01:60:5C:F0:50:00:F6:42:70:38:7A:90:40:FF',
+ 'snaps_size': 0,
+ 'snaps_size_allocated': 0,
+ 'storage_resource': {'UnityStorageResource': {}},
+ 'tiering_policy': 'TieringPolicyEnum.AUTOTIER_HIGH',
+ 'type': 'LUNTypeEnum.STANDALONE',
+ 'wwn': '60:06:01:60:5C:F0:50:00:41:25:EA:63:94:92:92:AE',
+ }}
+ else:
+ return 'Failed to modify the volume Atest with error'
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_cifsserver.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_cifsserver.py
new file mode 100644
index 000000000..e28c2e935
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_cifsserver.py
@@ -0,0 +1,169 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of CIFS server module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from mock.mock import MagicMock
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_cifsserver_api \
+ import MockCIFSServerApi
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \
+ import HttpError as http_error, MockApiException
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell import utils
+
+utils.get_logger = MagicMock()
+utils.get_unity_management_host_parameters = MagicMock()
+utils.ensure_required_libs = MagicMock()
+utils.get_unity_unisphere_connection = MagicMock()
+utils.UnityCifsServer = MagicMock()
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+from ansible_collections.dellemc.unity.plugins.modules.cifsserver import CIFSServer
+
+
+class TestCIFSServer():
+
+ get_module_args = MockCIFSServerApi.CIFS_SERVER_MODULE_ARGS
+
+ @pytest.fixture
+ def cifsserver_module_mock(self):
+ cifsserver_module_mock = CIFSServer()
+ cifsserver_module_mock.unity_conn = MagicMock()
+ utils.cifsserver = MagicMock()
+ return cifsserver_module_mock
+
+ def test_get_cifs_server_details(self, cifsserver_module_mock):
+ cifs_server_details = MockCIFSServerApi.get_cifs_server_details_method_response()
+ self.get_module_args.update({
+ 'cifs_server_name': 'test_cifs_server',
+ 'state': 'present'
+ })
+ cifsserver_module_mock.module.params = self.get_module_args
+ cifsserver_module_mock.unity_conn.get_cifs_server = MagicMock(return_value=MockSDKObject(cifs_server_details))
+ cifsserver_module_mock.perform_module_operation()
+ assert MockCIFSServerApi.get_cifs_server_details_method_response() == \
+ cifsserver_module_mock.module.exit_json.call_args[1]['cifs_server_details']
+
+ def test_get_cifs_server_details_using_id(self, cifsserver_module_mock):
+ cifs_server_details = MockCIFSServerApi.get_cifs_server_details_method_response()
+ self.get_module_args.update({
+ 'cifs_server_id': 'cifs_59',
+ 'state': 'present'
+ })
+ cifsserver_module_mock.module.params = self.get_module_args
+ cifsserver_module_mock.unity_conn.get_cifs_server = MagicMock(return_value=MockSDKObject(cifs_server_details))
+ cifsserver_module_mock.perform_module_operation()
+ assert MockCIFSServerApi.get_cifs_server_details_method_response() == \
+ cifsserver_module_mock.module.exit_json.call_args[1]['cifs_server_details']
+
+ def test_get_get_nas_server_id(self, cifsserver_module_mock):
+ nas_server_details = MockCIFSServerApi.get_nas_server_details()
+ self.get_module_args.update({
+ 'cifs_server_id': 'cifs_59',
+ 'nas_server_name': 'test_nas1',
+ 'state': 'present'
+ })
+ cifsserver_module_mock.module.params = self.get_module_args
+ cifsserver_module_mock.unity_conn.get_nas_server = MagicMock(return_value=MockSDKObject(nas_server_details))
+ cifsserver_module_mock.perform_module_operation()
+ cifsserver_module_mock.unity_conn.get_nas_server.assert_called()
+
+ def test_create_cifs_server(self, cifsserver_module_mock):
+ self.get_module_args.update({
+ 'nas_server_id': 'nas_18',
+ 'cifs_server_name': 'test_cifs_server',
+ 'domain': 'xxx.xxx.xxx.xxx',
+ 'domain_username': 'xxxxxxxx',
+ 'domain_password': 'xxxxxxxx',
+ 'state': 'present'
+ })
+ cifsserver_module_mock.module.params = self.get_module_args
+ cifsserver_module_mock.get_details = MagicMock(return_value=None)
+ cifsserver_module_mock.unity_conn.create_cifs_server = MagicMock(return_value=True)
+ cifsserver_module_mock.perform_module_operation()
+ assert cifsserver_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_create_cifs_server_throws_exception(self, cifsserver_module_mock):
+ self.get_module_args.update({
+ 'cifs_server_name': 'test_cifs_server',
+ 'domain': 'xxx.xxx.xxx.xxx',
+ 'domain_username': 'xxxxxxxx',
+ 'domain_password': 'xxxxxxxx',
+ 'state': 'present'
+ })
+ cifsserver_module_mock.module.params = self.get_module_args
+ cifsserver_module_mock.get_details = MagicMock(return_value=None)
+ cifsserver_module_mock.perform_module_operation()
+ assert MockCIFSServerApi.create_cifs_server_without_nas() == cifsserver_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_delete_cifs_server(self, cifsserver_module_mock):
+ cifs_server_details = MockCIFSServerApi.get_cifs_server_details_method_response()
+ self.get_module_args.update({
+ 'cifs_server_name': 'test_cifs_server',
+ 'unjoin_cifs_server_account': False,
+ 'domain_username': 'xxxxxxxx',
+ 'domain_password': 'xxxxxxxx',
+ 'state': 'absent'
+ })
+ cifsserver_module_mock.module.params = self.get_module_args
+ cifsserver_module_mock.get_details = MagicMock(return_value=cifs_server_details)
+ cifsserver_module_mock.get_cifs_server_instance = MagicMock(return_value=MockSDKObject(cifs_server_details))
+ cifsserver_module_mock.perform_module_operation()
+ assert cifsserver_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_is_modification_required(self, cifsserver_module_mock):
+ cifs_server_details = MockCIFSServerApi.get_cifs_server_details_method_response()
+ self.get_module_args.update({
+ 'cifs_server_name': 'test_cifs_server',
+ 'netbios_name': 'ansible_netbios',
+ 'state': 'present'
+ })
+ cifsserver_module_mock.module.params = self.get_module_args
+ cifsserver_module_mock.get_details = MagicMock(return_value=cifs_server_details)
+ cifsserver_module_mock.perform_module_operation()
+ assert MockCIFSServerApi.modify_error_msg() == cifsserver_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_is_domain_modification_required(self, cifsserver_module_mock):
+ cifs_server_details = MockCIFSServerApi.get_cifs_server_details_method_response()
+ self.get_module_args.update({
+ 'cifs_server_name': 'test_cifs_server',
+ 'domain': 'yyy.yyy.yyy.yyy',
+ 'state': 'present'
+ })
+ cifsserver_module_mock.module.params = self.get_module_args
+ cifsserver_module_mock.get_details = MagicMock(return_value=cifs_server_details)
+ cifsserver_module_mock.perform_module_operation()
+ print(cifsserver_module_mock.module.fail_json.call_args[1])
+ assert MockCIFSServerApi.modify_error_msg() == cifsserver_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_is_modify_interfaces(self, cifsserver_module_mock):
+ cifs_server_details = MockCIFSServerApi.get_cifs_server_details_method_response()
+ self.get_module_args.update({
+ 'cifs_server_name': 'test_cifs_server',
+ 'interfaces': ['if_39'],
+ 'state': 'present'
+ })
+ cifsserver_module_mock.module.params = self.get_module_args
+ cifsserver_module_mock.get_details = MagicMock(return_value=cifs_server_details)
+ cifsserver_module_mock.perform_module_operation()
+ print(cifsserver_module_mock.module.fail_json.call_args[1])
+ assert MockCIFSServerApi.modify_error_msg() == cifsserver_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_is_modify_interfaces_idempotency(self, cifsserver_module_mock):
+ cifs_server_details = MockCIFSServerApi.get_cifs_server_details_method_response()
+ self.get_module_args.update({
+ 'cifs_server_name': 'test_cifs_server',
+ 'interfaces': ['if_43'],
+ 'state': 'present'
+ })
+ cifsserver_module_mock.module.params = self.get_module_args
+ cifsserver_module_mock.get_details = MagicMock(return_value=cifs_server_details)
+ cifsserver_module_mock.perform_module_operation()
+ assert cifsserver_module_mock.module.exit_json.call_args[1]['changed'] is False
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_consistencygroup.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_consistencygroup.py
new file mode 100644
index 000000000..dd2cdd81a
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_consistencygroup.py
@@ -0,0 +1,193 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Unit Tests for consistency group module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from mock.mock import MagicMock
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_consistencygroup_api \
+ import MockConsistenyGroupApi
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \
+ import MockApiException
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+utils.get_logger = MagicMock()
+utils.get_unity_management_host_parameters = MagicMock()
+utils.ensure_required_libs = MagicMock()
+utils.get_unity_unisphere_connection = MagicMock(side_effect=[MagicMock(),
+ MockConsistenyGroupApi.get_remote_system_conn_response()])
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+
+from ansible_collections.dellemc.unity.plugins.modules.consistencygroup import ConsistencyGroup
+
+
+class TestConsistencyGroup():
+
+ get_module_args = MockConsistenyGroupApi.CONSISTENCY_GROUP_MODULE_ARGS
+
+ @pytest.fixture
+ def consistencygroup_module_mock(self):
+ consistencygroup_module_mock = ConsistencyGroup()
+ consistencygroup_module_mock.unity_conn = MagicMock()
+ utils.cg = MagicMock()
+ return consistencygroup_module_mock
+
+ def test_enable_cg_replication(self, consistencygroup_module_mock):
+ self.get_module_args.update({
+ 'cg_name': 'lun_test_cg_source_12',
+ 'replication_params': {
+ 'destination_cg_name': 'destination_cg_1',
+ 'replication_mode': 'asynchronous',
+ 'rpo': 60,
+ 'replication_type': 'remote',
+ 'remote_system': {
+ 'remote_system_host': '11.111.11.11',
+ 'remote_system_verifycert': False,
+ 'remote_system_username': 'username',
+ 'remote_system_password': 'password',
+ 'remote_system_port': 1111
+ },
+ 'destination_pool_name': 'pool_test_name_1',
+ 'destination_pool_id': None
+ },
+ 'replication_state': 'enable',
+ 'state': 'present'
+ })
+ consistencygroup_module_mock.module.params = self.get_module_args
+ cg_details = MockConsistenyGroupApi.cg_get_details_method_response()
+ cg_object = MockConsistenyGroupApi.get_cg_object()
+ consistencygroup_module_mock.unity_conn.get_cg = MagicMock(return_value=cg_object)
+ consistencygroup_module_mock.get_details = MagicMock(side_effect=[
+ cg_details,
+ MockConsistenyGroupApi.get_cg_replication_dependent_response('cg_replication_enabled_details')])
+ cg_object.get_id = MagicMock(return_value=cg_details['id'])
+ utils.cg.UnityConsistencyGroup.get = MagicMock(return_value=cg_object)
+ cg_object.check_cg_is_replicated = MagicMock(return_value=False)
+ consistencygroup_module_mock.unity_conn.get_remote_system = \
+ MagicMock(return_value=MockConsistenyGroupApi.get_cg_replication_dependent_response('remote_system'))
+ utils.UnityStorageResource = MagicMock(return_value=MockSDKObject({}))
+ cg_object.replicate_cg_with_dst_resource_provisioning = MagicMock(return_value=None)
+ consistencygroup_module_mock.perform_module_operation()
+ assert consistencygroup_module_mock.module.exit_json.call_args[1]['consistency_group_details']['cg_replication_enabled'] is True
+ assert consistencygroup_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_enable_cg_replication_negative_1(self, consistencygroup_module_mock):
+ self.get_module_args.update({
+ 'cg_name': 'lun_test_cg_source_12',
+ 'replication_params': {
+ 'destination_cg_name': '',
+ 'replication_mode': 'asynchronous',
+ 'rpo': 60,
+ 'replication_type': 'local',
+ 'destination_pool_name': None,
+ 'destination_pool_id': 'pool_test_1'
+ },
+ 'replication_state': 'enable',
+ 'state': 'present'
+ })
+ consistencygroup_module_mock.module.params = self.get_module_args
+ cg_details = MockConsistenyGroupApi.cg_get_details_method_response()
+ cg_object = MockConsistenyGroupApi.get_cg_object()
+ consistencygroup_module_mock.unity_conn.get_cg = MagicMock(return_value=cg_object)
+ consistencygroup_module_mock.get_details = MagicMock(side_effect=[
+ cg_details,
+ MockConsistenyGroupApi.get_cg_replication_dependent_response('cg_replication_enabled_details')])
+ cg_object.get_id = MagicMock(return_value=cg_details['id'])
+ utils.cg.UnityConsistencyGroup.get = MagicMock(return_value=cg_object)
+ cg_object.check_cg_is_replicated = MagicMock(return_value=False)
+ consistencygroup_module_mock.unity_conn.get_remote_system = \
+ MagicMock(return_value=MockConsistenyGroupApi.get_cg_replication_dependent_response('remote_system'))
+ utils.UnityStorageResource = MagicMock(return_value=MockSDKObject({}))
+ cg_object.replicate_cg_with_dst_resource_provisioning = MagicMock(return_value=None)
+ consistencygroup_module_mock.perform_module_operation()
+ assert consistencygroup_module_mock.module.fail_json.call_args[1]['msg'] == \
+ MockConsistenyGroupApi.get_cg_replication_dependent_response('destination_cg_name_validation')
+
+ def test_enable_cg_replication_negative_2(self, consistencygroup_module_mock):
+ self.get_module_args.update({
+ 'cg_name': 'lun_test_cg_source_12',
+ 'replication_params': {
+ 'destination_cg_name': 'destination_cg_1',
+ 'replication_mode': 'asynchronous',
+ 'rpo': 60,
+ 'replication_type': 'remote',
+ 'remote_system': {
+ 'remote_system_host': '11.111.11.11',
+ 'remote_system_verifycert': False,
+ 'remote_system_username': 'username',
+ 'remote_system_password': 'password',
+ 'remote_system_port': 1111
+ },
+ 'destination_pool_name': None,
+ 'destination_pool_id': 'pool_test_1'
+ },
+ 'replication_state': 'enable',
+ 'state': 'present'
+ })
+ consistencygroup_module_mock.module.params = self.get_module_args
+ cg_details = MockConsistenyGroupApi.cg_get_details_method_response()
+ cg_object = MockConsistenyGroupApi.get_cg_object()
+ consistencygroup_module_mock.unity_conn.get_cg = MagicMock(return_value=cg_object)
+ consistencygroup_module_mock.get_details = MagicMock(side_effect=[
+ cg_details,
+ MockConsistenyGroupApi.get_cg_replication_dependent_response('cg_replication_enabled_details')])
+ cg_object.get_id = MagicMock(return_value=cg_details['id'])
+ utils.cg.UnityConsistencyGroup.get = MagicMock(return_value=cg_object)
+ cg_object.check_cg_is_replicated = MagicMock(return_value=False)
+ consistencygroup_module_mock.unity_conn.get_remote_system = MagicMock(side_effect=MockApiException)
+ consistencygroup_module_mock.perform_module_operation()
+ assert consistencygroup_module_mock.module.fail_json.call_args[1]['msg'] == \
+ MockConsistenyGroupApi.get_cg_replication_dependent_response('enable_cg_exception')
+
+ def test_disable_cg_replication(self, consistencygroup_module_mock):
+ self.get_module_args.update({
+ 'cg_name': 'lun_test_cg_source_12',
+ 'replication_state': 'disable',
+ 'state': 'present'
+ })
+ consistencygroup_module_mock.module.params = self.get_module_args
+ cg_details = MockConsistenyGroupApi.cg_get_details_method_response()
+ cg_object = MockConsistenyGroupApi.get_cg_object()
+ consistencygroup_module_mock.unity_conn.get_cg = MagicMock(return_value=cg_object)
+ consistencygroup_module_mock.get_details = MagicMock(side_effect=[
+ MockConsistenyGroupApi.get_cg_replication_dependent_response('cg_replication_enabled_details'),
+ cg_details])
+ cg_object.get_id = MagicMock(return_value=cg_details['id'])
+ utils.cg.UnityConsistencyGroup.get = MagicMock(return_value=cg_object)
+ cg_object.check_cg_is_replicated = MagicMock(return_value=True)
+ repl_session = MockConsistenyGroupApi.get_cg_replication_dependent_response('replication_session')
+ repl_session.delete = MagicMock(return_value=None)
+ consistencygroup_module_mock.unity_conn.get_replication_session = \
+ MagicMock(return_value=[repl_session])
+ consistencygroup_module_mock.perform_module_operation()
+ assert consistencygroup_module_mock.module.exit_json.call_args[1]['consistency_group_details']['cg_replication_enabled'] is False
+ assert consistencygroup_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_disable_cg_replication_throws_exception(self, consistencygroup_module_mock):
+ self.get_module_args.update({
+ 'cg_name': 'lun_test_cg_source_12',
+ 'replication_state': 'disable',
+ 'state': 'present'
+ })
+ consistencygroup_module_mock.module.params = self.get_module_args
+ cg_details = MockConsistenyGroupApi.cg_get_details_method_response()
+ cg_object = MockConsistenyGroupApi.get_cg_object()
+ consistencygroup_module_mock.unity_conn.get_cg = MagicMock(return_value=cg_object)
+ consistencygroup_module_mock.get_details = MagicMock(side_effect=[
+ MockConsistenyGroupApi.get_cg_replication_dependent_response('cg_replication_enabled_details'),
+ cg_details])
+ cg_object.get_id = MagicMock(return_value=cg_details['id'])
+ utils.cg.UnityConsistencyGroup.get = MagicMock(return_value=cg_object)
+ cg_object.check_cg_is_replicated = MagicMock(side_effect=MockApiException)
+ consistencygroup_module_mock.perform_module_operation()
+ assert consistencygroup_module_mock.module.fail_json.call_args[1]['msg'] == \
+ MockConsistenyGroupApi.get_cg_replication_dependent_response('disable_cg_exception')
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_filesystem.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_filesystem.py
new file mode 100644
index 000000000..c8551d08b
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_filesystem.py
@@ -0,0 +1,94 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Unit Tests for FileSystem module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+import re
+from mock.mock import MagicMock
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_filesystem_api \
+ import MockFileSystemApi
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+utils.get_logger = MagicMock()
+utils.UnityReplicationSession = object
+
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+
+from ansible_collections.dellemc.unity.plugins.modules.filesystem import Filesystem
+
+
+class TestFileSystem():
+
+ FILE_SYSTEM_MODULE_ARGS = {'filesystem_id': '123', 'filesystem_name': None, 'nas_server_name': None,
+ 'nas_server_id': None, 'pool_name': None, 'pool_id': None, 'size': None,
+ 'cap_unit': None, 'quota_config': None, 'snap_schedule_name': None,
+ 'snap_schedule_id': None, 'replication_params': {}, 'replication_state': None, 'state': None}
+
+ @pytest.fixture
+ def filesystem_module_mock(self):
+ filesystem_module_mock = Filesystem()
+ filesystem_module_mock.unity_conn = MagicMock()
+ return filesystem_module_mock
+
+ def test_enable_fs_replication(self, filesystem_module_mock):
+ self.FILE_SYSTEM_MODULE_ARGS.update(MockFileSystemApi.get_replication_params())
+ filesystem_module_mock.module.params = self.FILE_SYSTEM_MODULE_ARGS
+ filesystem_response = MockFileSystemApi.get_file_system_response()
+ filesystem_response['replicate_with_dst_resource_provisioning'] = MagicMock(return_value=True)
+ filesystem_module_mock.perform_module_operation()
+ assert filesystem_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_enable_fs_replication_invalid_params(self, filesystem_module_mock):
+ self.FILE_SYSTEM_MODULE_ARGS.update(MockFileSystemApi.get_replication_params(False))
+ filesystem_module_mock.module.params = self.FILE_SYSTEM_MODULE_ARGS
+ filesystem_module_mock.is_modify_required = MagicMock(return_value=False)
+ filesystem_module_mock.perform_module_operation()
+ assert "Invalid rpo value" in \
+ filesystem_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_enable_fs_replication_throws_ex(self, filesystem_module_mock):
+ self.FILE_SYSTEM_MODULE_ARGS.update(MockFileSystemApi.get_replication_params())
+ filesystem_module_mock.module.params = self.FILE_SYSTEM_MODULE_ARGS
+ filesystem_module_mock.is_modify_required = MagicMock(return_value=False)
+ filesystem_response = MockFileSystemApi.get_file_system_response()
+ filesystem_response['replicate_with_dst_resource_provisioning'] = MagicMock(side_effect=Exception)
+ filesystem_module_mock.get_filesystem = MagicMock(side_effect=[
+ MockSDKObject(filesystem_response)])
+ filesystem_module_mock.get_filesystem_display_attributes = MagicMock(side_effect=[
+ MockSDKObject(filesystem_response)])
+ filesystem_module_mock.perform_module_operation()
+ assert "Enabling replication to the filesystem failed with error" in \
+ re.sub(' <.*?>>', '', filesystem_module_mock.module.fail_json.call_args[1]['msg'])
+
+ def test_modify_fs_replication(self, filesystem_module_mock):
+ self.FILE_SYSTEM_MODULE_ARGS.update(MockFileSystemApi.get_replication_params())
+ filesystem_module_mock.module.params = self.FILE_SYSTEM_MODULE_ARGS
+ filesystem_module_mock.perform_module_operation()
+ filesystem_module_mock.get_replication_session_on_filter = MagicMock()
+ assert filesystem_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_disable_replication(self, filesystem_module_mock):
+ self.FILE_SYSTEM_MODULE_ARGS.update({'replication_state': 'disable', 'state': 'present'})
+ filesystem_module_mock.module.params = self.FILE_SYSTEM_MODULE_ARGS
+ filesystem_module_mock.get_filesystem_display_attributes = MagicMock()
+ filesystem_module_mock.perform_module_operation()
+ assert filesystem_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_disable_replication_throws_ex(self, filesystem_module_mock):
+ self.FILE_SYSTEM_MODULE_ARGS.update({'replication_state': 'disable', 'state': 'present'})
+ filesystem_module_mock.module.params = self.FILE_SYSTEM_MODULE_ARGS
+ filesystem_module_mock.get_replication_session = MagicMock(side_effect=Exception)
+ filesystem_module_mock.get_filesystem_display_attributes = MagicMock()
+ filesystem_module_mock.perform_module_operation()
+ assert "Disabling replication on the filesystem failed with error" in \
+ re.sub(' <.*?>', '', filesystem_module_mock.module.fail_json.call_args[1]['msg'])
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_host.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_host.py
new file mode 100644
index 000000000..de94c38d3
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_host.py
@@ -0,0 +1,143 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Unit Tests for host module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from mock.mock import MagicMock
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_host_api \
+ import MockHostApi
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \
+ import HttpError as http_error, MockApiException
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+utils.get_logger = MagicMock()
+utils.get_unity_management_host_parameters = MagicMock()
+utils.ensure_required_libs = MagicMock()
+utils.get_unity_unisphere_connection = MagicMock()
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+
+from ansible_collections.dellemc.unity.plugins.modules.host import Host
+
+
+class TestHost():
+
+ get_module_args = MockHostApi.HOST_MODULE_ARGS
+
+ @pytest.fixture
+ def host_module_mock(self):
+ host_module_mock = Host()
+ host_module_mock.unity = MagicMock()
+ utils.host = MagicMock()
+ return host_module_mock
+
+ def test_get_host_details(self, host_module_mock):
+ host_details = MockHostApi.get_host_details_response('api')
+ self.get_module_args.update({
+ 'host_name': 'host_name_1',
+ })
+ host_module_mock.module.params = self.get_module_args
+ host_module_mock.get_host_initiators_list = MagicMock(return_value=MockHostApi.get_host_initiators_list())
+ utils.host.UnityHostList.get = MagicMock(return_value=MockHostApi.get_host_count_response())
+ host_module_mock.unity.get_initiator = MagicMock(side_effect=[host_details['fc_host_initiators'][0], host_details['iscsi_host_initiators'][0]])
+ host_module_mock.unity.get_host = MagicMock(return_value=MockSDKObject(host_details))
+ host_module_mock.perform_module_operation()
+ assert MockHostApi.get_host_details_response('module')['host_details'] == host_module_mock.module.exit_json.call_args[1]['host_details']
+
+ def test_get_host_details_throws_exception(self, host_module_mock):
+ self.get_module_args.update({
+ 'host_name': 'name1'
+ })
+ host_module_mock.module.params = self.get_module_args
+ utils.HttpError = http_error
+ utils.host.UnityHostList.get = MagicMock(side_effect=http_error)
+ host_module_mock.create_host = MagicMock(return_value=(False, MagicMock()))
+ host_module_mock.perform_module_operation()
+ assert MockHostApi.get_host_details_response('error') == host_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_add_network_address_to_host(self, host_module_mock):
+ self.get_module_args.update({
+ 'host_name': 'host_name_1',
+ 'network_address': 'net_add_1',
+ 'network_address_state': 'present-in-host',
+ 'state': 'present'
+ })
+ host_module_mock.module.params = self.get_module_args
+ host_details = MockHostApi.get_host_details_response('api')
+ host_module_mock.unity.get_initiator = MagicMock(side_effect=[host_details['fc_host_initiators'][0], host_details['iscsi_host_initiators'][0]])
+ host_module_mock.get_host_initiators_list = MagicMock(return_value=MockHostApi.get_host_initiators_list())
+ host_module_mock.unity.get_host = MagicMock(return_value=MockSDKObject(MockHostApi.get_host_details_after_network_address_addition('api')))
+ host_details = MockSDKObject(host_details)
+ host_details.add_ip_port = MagicMock(return_value=None)
+ host_details.add_to_skip_list('add_ip_port')
+ host_module_mock.get_host_details = MagicMock(return_value=host_details)
+ host_module_mock.perform_module_operation()
+ assert MockHostApi.get_host_details_after_network_address_addition('module')['host_details'] == \
+ host_module_mock.module.exit_json.call_args[1]['host_details']
+ assert MockHostApi.get_host_details_after_network_address_addition('module')['changed'] == host_module_mock.module.exit_json.call_args[1]['changed']
+
+ def test_add_network_address_to_host_negative(self, host_module_mock):
+ self.get_module_args.update({
+ 'host_name': 'host_name_1',
+ 'network_address': 'net_ad$$$$$d_12',
+ 'network_address_state': 'present-in-host',
+ 'state': 'present'
+ })
+ host_module_mock.module.params = self.get_module_args
+ host_details = MockHostApi.get_host_details_response('api')
+ host_module_mock.unity.get_initiator = MagicMock(side_effect=[host_details['fc_host_initiators'][0], host_details['iscsi_host_initiators'][0]])
+ host_module_mock.get_host_initiators_list = MagicMock(return_value=MockHostApi.get_host_initiators_list())
+ host_module_mock.manage_network_address = MagicMock(return_value=(None, False))
+ host_module_mock.get_host_details = MagicMock(return_value=MockSDKObject(host_details))
+ host_module_mock.perform_module_operation()
+ assert MockHostApi.get_host_details_after_network_address_addition('invalid_address') == \
+ host_module_mock.module.fail_json.call_args[1]['msg']
+ assert host_module_mock.module.exit_json.call_args[1]['changed'] is False
+
+ def test_remove_network_address_from_host(self, host_module_mock):
+ self.get_module_args.update({
+ 'host_name': 'host_name_1',
+ 'network_address': 'host_name_1',
+ 'network_address_state': 'absent-in-host',
+ 'state': 'present'
+ })
+ host_module_mock.module.params = self.get_module_args
+ host_details = MockHostApi.get_host_details_response('api')
+ host_module_mock.unity.get_initiator = MagicMock(side_effect=[host_details['fc_host_initiators'][0], host_details['iscsi_host_initiators'][0]])
+ host_module_mock.get_host_initiators_list = MagicMock(return_value=MockHostApi.get_host_initiators_list())
+ host_module_mock.unity.get_host = MagicMock(return_value=MockSDKObject(MockHostApi.get_host_details_after_network_address_removal('api')))
+ host_details = MockSDKObject(host_details)
+ host_details.delete_ip_port = MagicMock(return_value=None)
+ host_details.add_to_skip_list('delete_ip_port')
+ host_module_mock.get_host_details = MagicMock(return_value=host_details)
+ host_module_mock.perform_module_operation()
+ assert MockHostApi.get_host_details_after_network_address_removal('module')['host_details'] == \
+ host_module_mock.module.exit_json.call_args[1]['host_details']
+ assert MockHostApi.get_host_details_after_network_address_removal('module')['changed'] == host_module_mock.module.exit_json.call_args[1]['changed']
+
+ def test_remove_network_address_from_host_negative(self, host_module_mock):
+ self.get_module_args.update({
+ 'host_name': 'host_name_1',
+ 'network_address': '1.1.1',
+ 'network_address_state': 'absent-in-host',
+ 'state': 'present'
+ })
+ host_module_mock.module.params = self.get_module_args
+ host_details = MockHostApi.get_host_details_response('api')
+ host_module_mock.unity.get_initiator = MagicMock(side_effect=[host_details['fc_host_initiators'][0], host_details['iscsi_host_initiators'][0]])
+ host_module_mock.get_host_initiators_list = MagicMock(return_value=MockHostApi.get_host_initiators_list())
+ host_module_mock.manage_network_address = MagicMock(return_value=(None, False))
+ host_module_mock.get_host_details = MagicMock(return_value=MockSDKObject(host_details))
+ host_module_mock.perform_module_operation()
+ assert MockHostApi.get_host_details_after_network_address_removal('invalid_IPV4') == \
+ host_module_mock.module.fail_json.call_args[1]['msg']
+ assert host_module_mock.module.exit_json.call_args[1]['changed'] is False
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_interface.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_interface.py
new file mode 100644
index 000000000..88151bcba
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_interface.py
@@ -0,0 +1,350 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Unit Tests for interface module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from mock.mock import MagicMock
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_interface_api \
+ import MockInterfaceApi
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \
+ import MockApiException
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+utils.get_logger = MagicMock()
+utils.get_unity_management_host_parameters = MagicMock()
+utils.ensure_required_libs = MagicMock()
+utils.get_unity_unisphere_connection = MagicMock()
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+
+from ansible_collections.dellemc.unity.plugins.modules.interface import Interface
+
+
+class TestInterface():
+
+ interface_module_args = MockInterfaceApi.INTERFACE_MODULE_ARGS
+
+ @pytest.fixture
+ def interface_module_mock(self):
+ interface_module_mock = Interface()
+ interface_module_mock.module.check_mode = False
+ interface_module_mock.unity_conn = MagicMock()
+ return interface_module_mock
+
+ def test_validate_param_ethernet_port_name_negative(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'ethernet_port_name': " ",
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ mock_none_response = MagicMock(return_value=None)
+ interface_module_mock.get_nas_server_obj = mock_none_response
+ interface_module_mock.validate_create_params = mock_none_response
+ interface_module_mock.add_interface = mock_none_response
+ interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})])
+ interface_module_mock.perform_module_operation()
+ assert MockInterfaceApi.get_interface_error_response('invalid_ethernet_port_name') == \
+ interface_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_validate_param_vlan_id_negative(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'vlan_id': 2,
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ mock_none_response = MagicMock(return_value=None)
+ interface_module_mock.get_nas_server_obj = mock_none_response
+ interface_module_mock.validate_create_params = mock_none_response
+ interface_module_mock.add_interface = mock_none_response
+ interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})])
+ interface_module_mock.perform_module_operation()
+ assert MockInterfaceApi.get_interface_error_response('invalid_vlan_id') == \
+ interface_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_validate_param_interface_ip_negative(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'interface_ip': "10.2.2",
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ mock_none_response = MagicMock(return_value=None)
+ interface_module_mock.get_nas_server_obj = mock_none_response
+ interface_module_mock.validate_create_params = mock_none_response
+ interface_module_mock.add_interface = mock_none_response
+ interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})])
+ interface_module_mock.perform_module_operation()
+ assert MockInterfaceApi.get_interface_error_response('invalid_interface_ip') == \
+ interface_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_validate_param_gateway_negative(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'gateway': "10.2.1",
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ mock_none_response = MagicMock(return_value=None)
+ interface_module_mock.get_nas_server_obj = mock_none_response
+ interface_module_mock.validate_create_params = mock_none_response
+ interface_module_mock.add_interface = mock_none_response
+ interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})])
+ interface_module_mock.perform_module_operation()
+ assert MockInterfaceApi.get_interface_error_response('invalid_gateway') == \
+ interface_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_validate_param_netmask_negative(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'netmask': "10.2.0/2",
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ mock_none_response = MagicMock(return_value=None)
+ interface_module_mock.get_nas_server_obj = mock_none_response
+ interface_module_mock.validate_create_params = mock_none_response
+ interface_module_mock.add_interface = mock_none_response
+ interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})])
+ interface_module_mock.perform_module_operation()
+ assert MockInterfaceApi.get_interface_error_response('invalid_netmask') == \
+ interface_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_get_nas_server_obj_negative(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_id': "nas_id_00",
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ mock_none_response = MagicMock(return_value=None)
+ interface_module_mock.unity_conn.get_nas_server = MagicMock(return_value=MockInterfaceApi.get_nas_server_obj_existed_false())
+ interface_module_mock.validate_create_params = mock_none_response
+ interface_module_mock.add_interface = mock_none_response
+ interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})])
+ interface_module_mock.perform_module_operation()
+ assert MockInterfaceApi.get_nas_server_obj_errors('existed_false') == \
+ interface_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_get_nas_server_obj_exception(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_id': "nas_id_00",
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ mock_none_response = MagicMock(return_value=None)
+ interface_module_mock.unity_conn.get_nas_server = MagicMock(side_effect=MockApiException)
+ interface_module_mock.validate_create_params = mock_none_response
+ interface_module_mock.add_interface = mock_none_response
+ interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})])
+ interface_module_mock.perform_module_operation()
+ assert MockInterfaceApi.get_nas_server_obj_errors('exception') == \
+ interface_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_modify_operation_negative(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY,
+ 'vlan_id': 4,
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT
+ interface_module_mock.unity_conn.get_nas_server = MagicMock(return_value=nas_server_object)
+ interface_module_mock.unity_conn.get_file_interface = MagicMock(return_value=MockInterfaceApi.INTERFACE_OBJECT)
+ interface_module_mock.perform_module_operation()
+ assert MockInterfaceApi.get_interface_error_response('modify_failure') == \
+ interface_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_get_interface_details(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY,
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT
+ interface_module_mock.unity_conn.get_nas_server = MagicMock(return_value=nas_server_object)
+ interface_module_mock.unity_conn.get_file_interface = MagicMock(return_value=MockInterfaceApi.INTERFACE_OBJECT)
+ interface_module_mock.perform_module_operation()
+ interface_details = MockInterfaceApi.INTERFACE_OBJECT._get_properties()
+ assert interface_module_mock.module.exit_json.call_args[1]['interface_details'] == interface_details
+
+ def test_get_interface_details_exception(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY,
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT
+ interface_module_mock.unity_conn.get_nas_server = MagicMock(return_value=nas_server_object)
+ interface_module_mock.unity_conn.get_file_interface = MagicMock(side_effect=[MockApiException, MockInterfaceApi.INTERFACE_OBJECT])
+ interface_module_mock.validate_create_params = MagicMock(return_value=None)
+ interface_module_mock.add_interface = MagicMock(return_value=None)
+ interface_module_mock.perform_module_operation()
+ assert interface_module_mock.module.fail_json.call_args[1]['msg'] == \
+ MockInterfaceApi.get_interface_exception_response('interface_exception')
+
+ def test_add_interface_without_role_negative(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY,
+ 'ethernet_port_name': MockInterfaceApi.ETHERNET_PORT_NAME,
+ 'netmask': MockInterfaceApi.NETMASK_DUMMY,
+ 'gateway': MockInterfaceApi.GATEWAY_DUMMY,
+ 'vlan_id': 324,
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ nas_server_existing = MockInterfaceApi.get_nas_without_interface()
+ interface_module_mock.unity_conn.get_nas_server = MagicMock(return_Value=nas_server_existing)
+ interface_module_mock.add_interface = MagicMock(return_value=None)
+ interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})])
+ interface_module_mock.perform_module_operation()
+ assert interface_module_mock.module.fail_json.call_args[1]['msg'] == \
+ MockInterfaceApi.get_interface_error_response('no_role')
+
+ def test_add_interface_without_ethernet_negative(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'role': "PRODUCTION",
+ 'netmask': MockInterfaceApi.NETMASK_DUMMY,
+ 'gateway': MockInterfaceApi.GATEWAY_DUMMY,
+ 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY,
+ 'vlan_id': 324,
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ nas_server_existing = MockInterfaceApi.get_nas_without_interface()
+ interface_module_mock.unity_conn.get_nas_server = MagicMock(return_Value=nas_server_existing)
+ interface_module_mock.add_interface = MagicMock(return_value=None)
+ interface_module_mock.get_interface_details = MagicMock(side_effect=[None, MockSDKObject({})])
+ interface_module_mock.perform_module_operation()
+ assert interface_module_mock.module.fail_json.call_args[1]['msg'] == \
+ MockInterfaceApi.get_interface_error_response('no_ethernet')
+
+ def test_add_interface(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY,
+ 'ethernet_port_name': MockInterfaceApi.ETHERNET_PORT_NAME,
+ 'role': "PRODUCTION",
+ 'netmask': MockInterfaceApi.NETMASK_DUMMY,
+ 'gateway': MockInterfaceApi.GATEWAY_DUMMY,
+ 'vlan_id': 324,
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT
+ nas_server_existing = MockInterfaceApi.get_nas_without_interface()
+ nas_server_existing.get_id = MagicMock(return_value='nas_id_00')
+ nas_server_existing.add_to_skip_list('get_id')
+ interface_module_mock.unity_conn.get_nas_server = MagicMock(side_effect=[nas_server_existing,
+ nas_server_object])
+ interface_module_mock.unity_conn.get_file_interface = MagicMock(return_value=MockInterfaceApi.INTERFACE_OBJECT)
+ utils.FileInterfaceRoleEnum = MockInterfaceApi.FILE_INTERFACE_ROLE_ENUM_DUMMY
+ ethernet_port_info = MagicMock()
+ ethernet_port_info.id = 'ethernet_port_id_0'
+ interface_module_mock.unity_conn.get_ethernet_port = MagicMock(return_value=ethernet_port_info)
+ utils.UnityFileInterface = MagicMock()
+ utils.UnityFileInterface.create = MagicMock(return_value=None)
+ interface_module_mock.perform_module_operation()
+ interface_details = MockInterfaceApi.INTERFACE_OBJECT._get_properties()
+ assert interface_module_mock.module.exit_json.call_args[1]['interface_details'] == interface_details
+
+ def test_add_interface_no_change(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY,
+ 'ethernet_port_name': MockInterfaceApi.ETHERNET_PORT_NAME,
+ 'role': "PRODUCTION",
+ 'netmask': MockInterfaceApi.NETMASK_DUMMY,
+ 'gateway': MockInterfaceApi.GATEWAY_DUMMY,
+ 'vlan_id': 324,
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT
+ interface_module_mock.unity_conn.get_nas_server = MagicMock(side_effect=[nas_server_object,
+ nas_server_object])
+ interface_module_mock.unity_conn.get_file_interface = MagicMock(return_value=MockInterfaceApi.INTERFACE_OBJECT)
+ utils.FileInterfaceRoleEnum = MockInterfaceApi.FILE_INTERFACE_ROLE_ENUM_DUMMY
+ ethernet_port_info = MagicMock()
+ ethernet_port_info.id = 'ethernet_port_id_0'
+ interface_module_mock.unity_conn.get_ethernet_port = MagicMock(return_value=ethernet_port_info)
+ utils.UnityFileInterface = MagicMock()
+ utils.UnityFileInterface.create = MagicMock(return_value=None)
+ interface_module_mock.perform_module_operation()
+ assert interface_module_mock.module.exit_json.call_args[1]['changed'] is False
+
+ def test_add_interface_exception(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY,
+ 'ethernet_port_name': MockInterfaceApi.ETHERNET_PORT_NAME,
+ 'role': "PRODUCTION",
+ 'netmask': MockInterfaceApi.NETMASK_DUMMY,
+ 'gateway': MockInterfaceApi.GATEWAY_DUMMY,
+ 'vlan_id': 324,
+ 'state': "present"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT
+ nas_server_existing = MockInterfaceApi.get_nas_without_interface()
+ nas_server_existing.get_id = MagicMock(return_value='nas_id_00')
+ nas_server_existing.add_to_skip_list('get_id')
+ interface_module_mock.unity_conn.get_nas_server = MagicMock(side_effect=[nas_server_existing,
+ nas_server_object])
+ interface_module_mock.unity_conn.get_file_interface = MagicMock(return_value=MockInterfaceApi.INTERFACE_OBJECT)
+ utils.FileInterfaceRoleEnum = MockInterfaceApi.FILE_INTERFACE_ROLE_ENUM_DUMMY
+ ethernet_port_info = MagicMock()
+ ethernet_port_info.id = 'ethernet_port_id_0'
+ interface_module_mock.unity_conn.get_ethernet_port = MagicMock(return_value=ethernet_port_info)
+ utils.UnityFileInterface = MagicMock()
+ utils.UnityFileInterface.create = MagicMock(side_effect=MockApiException)
+ interface_module_mock.perform_module_operation()
+ assert interface_module_mock.module.fail_json.call_args[1]['msg'] == \
+ MockInterfaceApi.get_interface_exception_response('add_interface_exception')
+
+ def test_delete_interface(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY,
+ 'state': "absent"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT
+ interface_module_mock.unity_conn.get_nas_server = MagicMock(return_value=nas_server_object)
+ interface_object = MockInterfaceApi.INTERFACE_OBJECT
+ interface_object.delete = MagicMock(return_value=None)
+ interface_object.add_to_skip_list('delete')
+ interface_module_mock.unity_conn.get_file_interface = MagicMock(return_value=interface_object)
+ interface_module_mock.perform_module_operation()
+ assert interface_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_delete_interface_exception(self, interface_module_mock):
+ self.interface_module_args.update({
+ 'nas_server_name': "dummy_nas",
+ 'interface_ip': MockInterfaceApi.INTERFACE_DUMMY,
+ 'state': "absent"
+ })
+ interface_module_mock.module.params = self.interface_module_args
+ nas_server_object = MockInterfaceApi.NAS_SERVER_OBJECT
+ interface_module_mock.unity_conn.get_nas_server = MagicMock(return_value=nas_server_object)
+ interface_object = MockInterfaceApi.INTERFACE_OBJECT
+ interface_object.delete = MagicMock(side_effect=MockApiException)
+ interface_object.add_to_skip_list('delete')
+ interface_module_mock.unity_conn.get_file_interface = MagicMock(return_value=interface_object)
+ interface_module_mock.perform_module_operation()
+ assert interface_module_mock.module.fail_json.call_args[1]['msg'] == \
+ MockInterfaceApi.get_interface_exception_response('delete_interface_exception')
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nasserver.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nasserver.py
new file mode 100644
index 000000000..a929ba497
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nasserver.py
@@ -0,0 +1,112 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Unit Tests for NAS Server module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from mock.mock import MagicMock
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_nasserver_api \
+ import MockNASServerApi
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+utils.get_logger = MagicMock()
+utils.nas_server = MagicMock()
+utils.get_unity_management_host_parameters = MagicMock()
+utils.ensure_required_libs = MagicMock()
+utils.get_unity_unisphere_connection = MagicMock()
+
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+
+from ansible_collections.dellemc.unity.plugins.modules.nasserver import NASServer
+
+
+class TestNASServer():
+
+ NAS_SERVER_MODULE_ARGS = {'nas_server_name': 'nas0', 'nas_server_id': None, 'nas_server_new_name': None, 'default_unix_user': None,
+ 'default_windows_user': None, 'is_replication_destination': None, 'is_multiprotocol_enabled': None,
+ 'allow_unmapped_user': None, 'enable_windows_to_unix_username_mapping': None,
+ 'is_backup_only': None, 'is_packet_reflect_enabled': None, 'current_unix_directory_service': None,
+ 'replication_reuse_resource': None, 'replication_params': {}, 'replication_state': None, 'state': None}
+
+ @pytest.fixture
+ def nasserver_module_mock(self):
+ nasserver_module_mock = NASServer()
+ nasserver_module_mock.unity_conn = MagicMock()
+ return nasserver_module_mock
+
+ def get_nas_response(self):
+ nasserver_response = MockNASServerApi.get_nas_server_response()
+ nasserver_response['replicate_with_dst_resource_provisioning'] = MagicMock(return_value=True)
+ return nasserver_response
+
+ def test_enable_nas_replication(self, nasserver_module_mock):
+ self.NAS_SERVER_MODULE_ARGS.update(MockNASServerApi.get_replication_params())
+ nasserver_module_mock.module.params = self.NAS_SERVER_MODULE_ARGS
+ nasserver_module_mock.to_update = MagicMock(return_value=False)
+ nasserver_module_mock.get_nas_server = \
+ MagicMock(return_value=MockSDKObject(self.get_nas_response()))
+ nasserver_module_mock.perform_module_operation()
+ assert nasserver_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_enable_nas_replication_invalid_params(self, nasserver_module_mock):
+ self.NAS_SERVER_MODULE_ARGS.update(MockNASServerApi.get_replication_params(False))
+ nasserver_module_mock.module.params = self.NAS_SERVER_MODULE_ARGS
+ nasserver_module_mock.get_nas_server = \
+ MagicMock(return_value=MockSDKObject(self.get_nas_response()))
+ nasserver_module_mock.to_update = MagicMock(return_value=False)
+ nasserver_module_mock.perform_module_operation()
+ assert "rpo value should be in range of 5 to 1440" in \
+ nasserver_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_enable_nas_replication_throws_ex(self, nasserver_module_mock):
+ self.NAS_SERVER_MODULE_ARGS.update(MockNASServerApi.get_replication_params())
+ nasserver_module_mock.module.params = self.NAS_SERVER_MODULE_ARGS
+ nasserver_module_mock.to_update = MagicMock(return_value=False)
+ nasserver_module_mock.get_nas_server = \
+ MagicMock(return_value=MockSDKObject(self.get_nas_response()))
+ nasserver_module_mock.get_remote_system = MagicMock(side_effect=Exception)
+ nasserver_module_mock.perform_module_operation()
+ assert "Enabling replication to the nas server %s failed with error" \
+ % self.NAS_SERVER_MODULE_ARGS['nas_server_name'] in \
+ nasserver_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_modify_nas_replication(self, nasserver_module_mock):
+ self.NAS_SERVER_MODULE_ARGS.update(MockNASServerApi.get_replication_params())
+ nasserver_module_mock.module.params = self.NAS_SERVER_MODULE_ARGS
+ nasserver_module_mock.to_update = MagicMock(return_value=False)
+ nasserver_module_mock.get_nas_server = \
+ MagicMock(return_value=MockSDKObject(self.get_nas_response()))
+ nasserver_module_mock.get_replication_session_on_filter = MagicMock()
+ nasserver_module_mock.perform_module_operation()
+ assert nasserver_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_disable_replication(self, nasserver_module_mock):
+ self.NAS_SERVER_MODULE_ARGS.update({'replication_state': 'disable', 'state': 'present'})
+ nasserver_module_mock.module.params = self.NAS_SERVER_MODULE_ARGS
+ nasserver_module_mock.get_nas_server = \
+ MagicMock(return_value=MockSDKObject(self.get_nas_response()))
+ nasserver_module_mock.to_update = MagicMock(return_value=False)
+ nasserver_module_mock.update_replication_params = MagicMock()
+ nasserver_module_mock.perform_module_operation()
+ assert nasserver_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_disable_replication_throws_ex(self, nasserver_module_mock):
+ self.NAS_SERVER_MODULE_ARGS.update({'replication_state': 'disable', 'state': 'present'})
+ nasserver_module_mock.module.params = self.NAS_SERVER_MODULE_ARGS
+ nasserver_module_mock.get_nas_server = \
+ MagicMock(return_value=MockSDKObject(self.get_nas_response()))
+ nasserver_module_mock.to_update = MagicMock(return_value=False)
+ nasserver_module_mock.get_replication_session = MagicMock(side_effect=Exception)
+ nasserver_module_mock.perform_module_operation()
+ assert "Disabling replication on the nas server %s failed with error" \
+ % self.NAS_SERVER_MODULE_ARGS['nas_server_name'] in \
+ nasserver_module_mock.module.fail_json.call_args[1]['msg']
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfs.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfs.py
new file mode 100644
index 000000000..ed138147c
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfs.py
@@ -0,0 +1,183 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Unit Tests for nfs module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from mock.mock import MagicMock
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_nfs_api \
+ import MockNfsApi
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \
+ import MockApiException
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+utils.get_logger = MagicMock()
+utils.get_unity_management_host_parameters = MagicMock()
+utils.ensure_required_libs = MagicMock()
+utils.get_unity_unisphere_connection = MagicMock()
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+
+from ansible_collections.dellemc.unity.plugins.modules import nfs
+
+
+class TestNfs():
+
+ get_module_args = MockNfsApi.NFS_MODULE_ARGS
+
+ @pytest.fixture
+ def nfs_module_mock(self):
+ nfs_module_mock = nfs.NFS()
+ nfs_module_mock.unity = MagicMock()
+ return nfs_module_mock
+
+ def test_add_host_in_nfs_share_on_advhostmgmt_true(self, nfs_module_mock):
+ self.get_module_args.update({
+ 'nfs_export_name': "nfsshare_dummy_name",
+ 'filesystem_id': "fs_id_1",
+ 'adv_host_mgmt_enabled': True,
+ 'no_access_hosts': [{'host_name': "host1"}, {'ip_address': "**.***.2.2"}],
+ 'host_state': 'present-in-export',
+ 'state': 'present'
+ })
+ nfs_module_mock.module.params = self.get_module_args
+ utils.UnityNfsShareList = MagicMock
+ nfs_object = MockNfsApi.get_nfs_share_object_on_host_access('add', True)
+ nfs_object.modify = MagicMock(return_value=None)
+ nfs_object.add_to_skip_list('modify')
+ fs_object = MockNfsApi.FILESYSTEM_OBJECT
+ get_nfs_share_display_attrs_data = MockNfsApi.get_nfs_share_display_attr_on_host_access('add', True)
+ nfs_module_mock.unity.get_filesystem = MagicMock(return_value=fs_object)
+ nfs_module_mock.unity.get_nfs_share = MagicMock(return_value=nfs_object)
+ nfs_module_mock.unity.get_host = MagicMock(side_effect=[MockNfsApi.get_host_obj(id=1), MockNfsApi.get_host_obj(id=2)])
+ nfs.get_nfs_share_display_attrs = MagicMock(return_value=get_nfs_share_display_attrs_data)
+ nfs_module_mock.perform_module_operation()
+ assert nfs_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_remove_host_in_nfs_share_on_advhostmgmt_true(self, nfs_module_mock):
+ self.get_module_args.update({
+ 'nfs_export_name': "nfsshare_dummy_name",
+ 'filesystem_id': "fs_id_1",
+ 'adv_host_mgmt_enabled': True,
+ 'no_access_hosts': [{'host_name': "host1"}, {'ip_address': "**.***.2.2"}],
+ 'host_state': 'absent-in-export',
+ 'state': 'present'
+ })
+ nfs_module_mock.module.params = self.get_module_args
+ utils.UnityNfsShareList = MagicMock
+ nfs_object = MockNfsApi.get_nfs_share_object_on_host_access('remove', True)
+ nfs_object.modify = MagicMock(return_value=None)
+ nfs_object.add_to_skip_list('modify')
+ fs_object = MockNfsApi.FILESYSTEM_OBJECT
+ get_nfs_share_display_attrs_data = MockNfsApi.get_nfs_share_display_attr_on_host_access('remove', True)
+ nfs_module_mock.unity.get_filesystem = MagicMock(return_value=fs_object)
+ nfs_module_mock.unity.get_nfs_share = MagicMock(return_value=nfs_object)
+ nfs_module_mock.unity.get_host = MagicMock(side_effect=[MockNfsApi.get_host_obj(id=1), MockNfsApi.get_host_obj(id=2),
+ MockNfsApi.get_host_obj(id=1), MockNfsApi.get_host_obj(id=2)])
+ nfs.get_nfs_share_display_attrs = MagicMock(return_value=get_nfs_share_display_attrs_data)
+ nfs_module_mock.perform_module_operation()
+ assert nfs_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_add_host_in_nfs_share_on_advhostmgmt_false(self, nfs_module_mock):
+ self.get_module_args.update({
+ 'nfs_export_name': "nfsshare_dummy_name",
+ 'filesystem_id': "fs_id_1",
+ 'adv_host_mgmt_enabled': False,
+ 'read_only_root_hosts': [{'domain': MockNfsApi.DUMMY_DOMAIN_VALUE}, {'subnet': MockNfsApi.DUMMY_SUBNET_VALUE}],
+ 'host_state': 'present-in-export',
+ 'state': 'present'
+ })
+ nfs_module_mock.module.params = self.get_module_args
+ utils.UnityNfsShareList = MagicMock
+ nfs_object = MockNfsApi.get_nfs_share_object_on_host_access('add', False)
+ nfs_object.modify = MagicMock(return_value=None)
+ nfs_object.add_to_skip_list('modify')
+ fs_object = MockNfsApi.FILESYSTEM_OBJECT
+ get_nfs_share_display_attrs_data = MockNfsApi.get_nfs_share_display_attr_on_host_access('add', False)
+ nfs_module_mock.unity.get_filesystem = MagicMock(return_value=fs_object)
+ nfs_module_mock.unity.get_nfs_share = MagicMock(return_value=nfs_object)
+ nfs.get_nfs_share_display_attrs = MagicMock(return_value=get_nfs_share_display_attrs_data)
+ nfs_module_mock.perform_module_operation()
+ assert nfs_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_remove_host_in_nfs_share_on_advhostmgmt_false(self, nfs_module_mock):
+ self.get_module_args.update({
+ 'nfs_export_name': "nfsshare_dummy_name",
+ 'filesystem_id': "fs_id_1",
+ 'adv_host_mgmt_enabled': False,
+ 'read_only_root_hosts': [{'domain': MockNfsApi.DUMMY_DOMAIN_VALUE}, {'subnet': MockNfsApi.DUMMY_SUBNET_VALUE}],
+ 'host_state': 'absent-in-export',
+ 'state': 'present'
+ })
+ nfs_module_mock.module.params = self.get_module_args
+ utils.UnityNfsShareList = MagicMock
+ nfs_object = MockNfsApi.get_nfs_share_object_on_host_access('remove', False)
+ nfs_object.modify = MagicMock(return_value=None)
+ nfs_object.add_to_skip_list('modify')
+ fs_object = MockNfsApi.FILESYSTEM_OBJECT
+ get_nfs_share_display_attrs_data = MockNfsApi.get_nfs_share_display_attr_on_host_access('remove', False)
+ nfs_module_mock.unity.get_filesystem = MagicMock(return_value=fs_object)
+ nfs_module_mock.unity.get_nfs_share = MagicMock(return_value=nfs_object)
+ nfs.get_nfs_share_display_attrs = MagicMock(return_value=get_nfs_share_display_attrs_data)
+ nfs_module_mock.perform_module_operation()
+ assert nfs_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_host_access_nfs_share_subnet_negative(self, nfs_module_mock):
+ self.get_module_args.update({
+ 'nfs_export_name': "nfsshare_dummy_name",
+ 'filesystem_id': "fs_id_1",
+ 'adv_host_mgmt_enabled': False,
+ 'read_only_root_hosts': [{'subnet': "1x.x.x.x"}],
+ 'host_state': 'present-in-export',
+ 'state': 'present'
+ })
+ nfs_module_mock.module.params = self.get_module_args
+ nfs_module_mock.get_filesystem = MagicMock(return_value=None)
+ nfs_module_mock.get_nfs_share = MagicMock(return_value=None)
+ nfs_module_mock.create_nfs_share = MagicMock(return_value=None)
+ nfs.get_nfs_share_display_attrs = MagicMock(return_value=None)
+ nfs_module_mock.perform_module_operation()
+ assert nfs_module_mock.module.fail_json.call_args[1]['msg'] == MockNfsApi.host_access_negative_response('subnet_validation')
+
+ def test_host_access_nfs_share_advhostmngmt_negative(self, nfs_module_mock):
+ self.get_module_args.update({
+ 'nfs_export_name': "nfsshare_dummy_name",
+ 'filesystem_id': "fs_id_1",
+ 'read_only_root_hosts': [{'subnet': "1x.x.x.x/10"}],
+ 'host_state': 'present-in-export',
+ 'state': 'present'
+ })
+ nfs_module_mock.module.params = self.get_module_args
+ nfs_module_mock.get_filesystem = MagicMock(return_value=None)
+ nfs_module_mock.get_nfs_share = MagicMock(return_value=None)
+ nfs_module_mock.create_nfs_share = MagicMock(return_value=None)
+ nfs.get_nfs_share_display_attrs = MagicMock(return_value=None)
+ nfs_module_mock.perform_module_operation()
+ assert nfs_module_mock.module.fail_json.call_args[1]['msg'] == MockNfsApi.host_access_negative_response('advhostmngmnt_field_validation')
+
+ def test_host_access_nfs_share_exception_negative(self, nfs_module_mock):
+ self.get_module_args.update({
+ 'nfs_export_name': "nfsshare_dummy_name",
+ 'filesystem_id': "fs_id_1",
+ 'adv_host_mgmt_enabled': False,
+ 'read_only_root_hosts': [{'domain': MockNfsApi.DUMMY_DOMAIN_VALUE}, {'subnet': MockNfsApi.DUMMY_SUBNET_VALUE}],
+ 'host_state': 'absent-in-export',
+ 'state': 'present'
+ })
+ nfs_module_mock.module.params = self.get_module_args
+ utils.UnityNfsShareList = MagicMock
+ nfs_object = MockNfsApi.get_nfs_share_object_on_host_access('remove', False)
+ nfs_object.modify = MagicMock(side_effect=MockApiException)
+ nfs_object.add_to_skip_list('modify')
+ fs_object = MockNfsApi.FILESYSTEM_OBJECT
+ nfs_module_mock.unity.get_filesystem = MagicMock(return_value=fs_object)
+ nfs_module_mock.unity.get_nfs_share = MagicMock(return_value=nfs_object)
+ nfs.get_nfs_share_display_attrs = MagicMock(return_value=None)
+ nfs_module_mock.perform_module_operation()
+ assert nfs_module_mock.module.fail_json.call_args[1]['msg'] == MockNfsApi.host_access_negative_response('modify_exception')
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfsserver.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfsserver.py
new file mode 100644
index 000000000..c2a680487
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfsserver.py
@@ -0,0 +1,225 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of NFS server module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from mock.mock import MagicMock
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_nfsserver_api \
+ import MockNFSServerApi
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \
+ import HttpError as http_error, MockApiException
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell import utils
+
+utils.get_logger = MagicMock()
+utils.get_unity_management_host_parameters = MagicMock()
+utils.ensure_required_libs = MagicMock()
+utils.get_unity_unisphere_connection = MagicMock()
+utils.UnityNfsServer = MagicMock()
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+from ansible_collections.dellemc.unity.plugins.modules.nfsserver import NFSServer
+
+
+class TestNFSServer():
+
+ get_module_args = MockNFSServerApi.NFS_SERVER_MODULE_ARGS
+
+ @pytest.fixture
+ def nfsserver_module_mock(self):
+ nfsserver_module_mock = NFSServer()
+ nfsserver_module_mock.unity_conn = MagicMock()
+ utils.nfsserver = MagicMock()
+ nfsserver_module_mock.module.check_mode = False
+ return nfsserver_module_mock
+
+ def test_get_nfs_server_details(self, nfsserver_module_mock):
+ self.get_module_args.update({
+ 'nfs_server_id': 'nfs_95',
+ 'state': 'present'
+ })
+ nfsserver_module_mock.module.params = self.get_module_args
+ host_details = MockNFSServerApi.get_nas_server_id()
+ host_details.get_id = MagicMock(return_value="nas_10")
+ host_details.add_to_skip_list('get_id')
+ nfsserver_module_mock.unity_conn.get_nas_server = MagicMock(return_value=host_details)
+ nfsserver_module_mock.unity_conn.get_nfs_server = MagicMock(return_value=MockNFSServerApi.get_nfs_server_details()[0])
+ nfsserver_module_mock.perform_module_operation()
+ assert MockNFSServerApi.get_nfs_server_details_method_response() == \
+ nfsserver_module_mock.module.exit_json.call_args[1]['nfs_server_details']
+
+ def test_get_nfs_server_details_with_exception(self, nfsserver_module_mock):
+ self.get_module_args.update({
+ 'nas_server_name': 'test_nas_server',
+ 'state': 'present'
+ })
+ nfsserver_module_mock.module.params = self.get_module_args
+ host_details = MockNFSServerApi.get_nas_server_id()
+ host_details.get_id = MagicMock(return_value="nas_10")
+ host_details.add_to_skip_list('get_id')
+ nfsserver_module_mock.unity_conn.get_nas_server = MagicMock(return_value=host_details)
+ utils.HttpError = http_error
+ nfsserver_module_mock.unity_conn.get_nfs_server = MagicMock(side_effect=http_error)
+ nfsserver_module_mock.perform_module_operation()
+ assert MockNFSServerApi.get_nfs_server_api_exception() == \
+ nfsserver_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_create_nfs_server(self, nfsserver_module_mock):
+ self.get_module_args.update({
+ 'nas_server_name': 'dummy_name',
+ 'host_name': "dummy_nas23",
+ 'is_secure_enabled': True,
+ 'kerberos_domain_controller_type': "WINDOWS",
+ 'kerberos_domain_controller_username': "xxxxxxxx",
+ 'kerberos_domain_controller_password': "xxxxxxxx",
+ 'is_extended_credentials_enabled': False,
+ 'nfs_v4_enabled': True,
+ 'state': "present"
+ })
+ nfsserver_module_mock.module.params = self.get_module_args
+ nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=None)
+ utils.KdcTypeEnum = MagicMock(return_value={"KdcTypeEnum": {"description": "Windows", "name": "WINDOWS", "value": 2}})
+ utils.UnityNfsServer = MagicMock()
+ utils.UnityNfsServer.create = MagicMock(return_value=True)
+ nfsserver_module_mock.perform_module_operation()
+ assert nfsserver_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_create_nfs_server_with_unix(self, nfsserver_module_mock):
+ self.get_module_args.update({
+ 'nas_server_name': 'dummy_name',
+ 'host_name': "dummy_nas23",
+ 'is_secure_enabled': True,
+ 'kerberos_domain_controller_type': "UNIX",
+ 'kerberos_domain_controller_username': "xxxxxxxx",
+ 'kerberos_domain_controller_password': "xxxxxxxx",
+ 'is_extended_credentials_enabled': False,
+ 'nfs_v4_enabled': True,
+ 'state': "present"
+ })
+ nfsserver_module_mock.module.params = self.get_module_args
+ nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=None)
+ utils.KdcTypeEnum = MagicMock(return_value={"KdcTypeEnum": {"description": "Windows", "name": "UNIX", "value": 1}})
+ utils.UnityNfsServer = MagicMock()
+ utils.UnityNfsServer.create = MagicMock(return_value=True)
+ nfsserver_module_mock.perform_module_operation()
+ assert nfsserver_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_create_nfs_server_throws_exception(self, nfsserver_module_mock):
+ self.get_module_args.update({
+ 'nas_server_name': 'dummy_name',
+ 'host_name': "dummy_nas23",
+ 'is_secure_enabled': True,
+ 'kerberos_domain_controller_type': "WINDOWS",
+ 'kerberos_domain_controller_username': "xxxxxxxx",
+ 'kerberos_domain_controller_password': "xxxxxxxx",
+ 'is_extended_credentials_enabled': False,
+ 'nfs_v4_enabled': True,
+ 'state': "present"
+ })
+ nfsserver_module_mock.module.params = self.get_module_args
+ nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=None)
+ utils.UnityNfsServer = MagicMock()
+ utils.UnityNfsServer.create = MagicMock(side_effect=MockApiException)
+ nfsserver_module_mock.perform_module_operation()
+ assert MockNFSServerApi.create_nfs_server_with_api_exception() in nfsserver_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_delete_nfs_server(self, nfsserver_module_mock):
+ nfs_server_details = MockNFSServerApi.get_nfs_server_details_method_response()
+ self.get_module_args.update({
+ 'nas_server_name': 'test_nas_server',
+ 'kerberos_domain_controller_username': "xxxxxxxx",
+ 'kerberos_domain_controller_password': "xxxxxxxx",
+ 'remove_spn_from_kerberos': True,
+ 'state': "absent"
+ })
+ nfsserver_module_mock.module.params = self.get_module_args
+ nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=nfs_server_details)
+ nfsserver_module_mock.perform_module_operation()
+ assert nfsserver_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_delete_nfs_server_with_spn_false(self, nfsserver_module_mock):
+ nfs_server_details = MockNFSServerApi.get_nfs_server_details_method_response()
+ self.get_module_args.update({
+ 'nas_server_name': 'test_nas_server',
+ 'kerberos_domain_controller_username': "xxxxxxxx",
+ 'kerberos_domain_controller_password': "xxxxxxxx",
+ 'remove_spn_from_kerberos': False,
+ 'state': "absent"
+ })
+ nfsserver_module_mock.module.params = self.get_module_args
+ nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=nfs_server_details)
+ nfsserver_module_mock.perform_module_operation()
+ assert nfsserver_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_delete_nfs_server_with_exception(self, nfsserver_module_mock):
+ nfs_server_details = MockNFSServerApi.get_nfs_server_details_method_response()
+ self.get_module_args.update({
+ 'nas_server_name': 'test_nas_server',
+ 'kerberos_domain_controller_username': "xxxxxxxx",
+ 'kerberos_domain_controller_password': "xxxxxxxx",
+ 'remove_spn_from_kerberos': False,
+ 'state': "absent"
+ })
+ nfsserver_module_mock.module.params = self.get_module_args
+ nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=nfs_server_details)
+ nfsserver_module_mock.unity_conn.get_nfs_server = MagicMock(side_effect=MockApiException)
+ nfsserver_module_mock.perform_module_operation()
+ assert MockNFSServerApi.delete_exception() in nfsserver_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_is_modification_required(self, nfsserver_module_mock):
+ nfs_server_details = MockNFSServerApi.get_nfs_server_details_method_response()
+ self.get_module_args.update({
+ 'nas_server_name': 'test_nas_server',
+ 'is_extended_credentials_enabled': True,
+ 'state': 'present'
+ })
+ nfsserver_module_mock.module.params = self.get_module_args
+ nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=nfs_server_details)
+ nfsserver_module_mock.perform_module_operation()
+ assert MockNFSServerApi.modify_error_msg() == nfsserver_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_get_nas_server_id_exception(self, nfsserver_module_mock):
+ nfs_server_details = MockNFSServerApi.get_nfs_server_details_method_response()
+ self.get_module_args.update({
+ 'nas_server_name': 'dummy_name',
+ 'is_secure_enabled': True,
+ 'host_name': "dummy_nas23",
+ 'kerberos_domain_controller_type': "WINDOWS",
+ 'kerberos_domain_controller_username': "xxxxxxxx",
+ 'kerberos_domain_controller_password': "xxxxxxxx",
+ 'is_extended_credentials_enabled': False,
+ 'nfs_v4_enabled': True,
+ 'state': "present"
+ })
+ nfsserver_module_mock.module.params = self.get_module_args
+ nfsserver_module_mock.unity_conn.get_nas_server = MagicMock(side_effect=MockApiException)
+ nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=nfs_server_details)
+ nfsserver_module_mock.perform_module_operation()
+ assert MockNFSServerApi.get_nas_server_id_api_exception() in \
+ nfsserver_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_create_nas_server_without_nas_server_id(self, nfsserver_module_mock):
+ self.get_module_args.update({
+ 'is_secure_enabled': True,
+ 'host_name': "dummy_nas23",
+ 'kerberos_domain_controller_type': "WINDOWS",
+ 'kerberos_domain_controller_username': "xxxxxxxx",
+ 'kerberos_domain_controller_password': "xxxxxxxx",
+ 'is_extended_credentials_enabled': False,
+ 'nfs_v4_enabled': True,
+ 'state': "present"
+ })
+ nfsserver_module_mock.module.params = self.get_module_args
+ nfsserver_module_mock.get_nas_server_id = MagicMock(return_value=None)
+ nfsserver_module_mock.get_nfs_server_details = MagicMock(return_value=None)
+ nfsserver_module_mock.create_nfs_server = MagicMock(return_value=None)
+ nfsserver_module_mock.perform_module_operation()
+ assert MockNFSServerApi.create_nfs_server_without_nas_server_id() in \
+ nfsserver_module_mock.module.fail_json.call_args[1]['msg']
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_storagepool.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_storagepool.py
new file mode 100644
index 000000000..94bf18c35
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_storagepool.py
@@ -0,0 +1,132 @@
+# Copyright: (c) 2022, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Unit Tests for host module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from mock.mock import MagicMock
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_storagepool_api \
+ import MockStoragePoolApi
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \
+ import MockApiException
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+utils.get_logger = MagicMock()
+utils.get_unity_management_host_parameters = MagicMock()
+utils.ensure_required_libs = MagicMock()
+utils.get_unity_unisphere_connection = MagicMock()
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+
+from ansible_collections.dellemc.unity.plugins.modules.storagepool import StoragePool
+
+
+class TestUnityStoragePool():
+
+ get_module_args = MockStoragePoolApi.STORAGE_POOL_MODULE_ARGS
+
+ @pytest.fixture
+ def storagepool_module_mock(self):
+ storagepool_module_mock = StoragePool()
+ storagepool_module_mock.conn = MagicMock()
+ return storagepool_module_mock
+
+ def test_get_host_details(self, storagepool_module_mock):
+ self.get_module_args.update({
+ 'pool_name': 'Ansible_Unity_TEST_1',
+ })
+ storagepool_module_mock.module.params = self.get_module_args
+ get_pool = MockSDKObject(MockStoragePoolApi.get_pool_details_response('get_pool'))
+ get_pool._get_property_from_raw = MagicMock(return_value=MockSDKObject({'is_schedule_enabled': True}))
+ get_pool.add_to_skip_list('_get_property_from_raw')
+ storagepool_module_mock.conn.get_pool = MagicMock(return_value=get_pool)
+ pool_object = MockStoragePoolApi.get_pool_details_response('pool_object')
+ utils.UnityPool = MagicMock()
+ utils.UnityPool.get = MagicMock(return_value=MockSDKObject(pool_object))
+ disk_list = MockStoragePoolApi.get_pool_details_response('disk_list')
+ utils.UnityDiskList = MagicMock()
+ utils.UnityDiskList.get = MagicMock(return_value=disk_list)
+ storagepool_module_mock.perform_module_operation()
+ assert MockStoragePoolApi.get_pool_details_response('module')['storage_pool_details'] == \
+ storagepool_module_mock.module.exit_json.call_args[1]['storage_pool_details']
+
+ def test_get_host_details_throws_exception(self, storagepool_module_mock):
+ self.get_module_args.update({
+ 'pool_name': 'Ansible_Unity_SP_3',
+ })
+ storagepool_module_mock.module.params = self.get_module_args
+ storagepool_module_mock.conn.get_pool = MagicMock(side_effect=MockApiException)
+ storagepool_module_mock.result = MagicMock()
+ storagepool_module_mock.get_pool_drives = MagicMock()
+ storagepool_module_mock.perform_module_operation()
+ storagepool_module_mock.is_pool_modification_required = MagicMock(return_value=False)
+ assert MockStoragePoolApi.get_pool_details_response('error') == storagepool_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_create_pool(self, storagepool_module_mock):
+ self.get_module_args.update({
+ 'pool_name': 'test_pool',
+ 'pool_description': 'Unity test pool.',
+ 'raid_groups': {
+ 'disk_group_id': "dg_16",
+ 'disk_num': 3,
+ 'raid_type': 'RAID10',
+ 'stripe_width': 'BEST_FIT',
+ },
+ 'alert_threshold': 50,
+ 'is_harvest_enabled': True,
+ 'pool_harvest_high_threshold': 59,
+ 'pool_harvest_low_threshold': 40,
+ 'is_snap_harvest_enabled': True,
+ 'snap_harvest_high_threshold': 80,
+ 'snap_harvest_low_threshold': 60,
+ 'fast_vp': "enabled",
+ 'fast_cache': "disabled",
+ 'pool_type': 'TRADITIONAL',
+ 'state': 'present'
+ })
+ storagepool_module_mock.module.params = self.get_module_args
+ storagepool_module_mock.get_raid_groups_response = MagicMock(return_value=None)
+ storagepool_module_mock.get_details = MagicMock(return_value=None)
+ pool_object = MockStoragePoolApi.create_pool_response('api')
+ utils.UnityPool = MagicMock()
+ utils.UnityPool.create = MagicMock(return_value=MockSDKObject(pool_object))
+ storagepool_module_mock.perform_module_operation()
+ assert storagepool_module_mock.module.exit_json.call_args[1]['changed']
+
+ def test_create_pool_throws_exception(self, storagepool_module_mock):
+ self.get_module_args.update({
+ 'pool_name': 'test_pool',
+ 'pool_description': 'Unity test pool.',
+ 'raid_groups': {
+ 'disk_group_id': "dg_16",
+ 'disk_num': 3,
+ 'raid_type': 'RAID10',
+ 'stripe_width': 'BEST_FIT',
+ },
+ 'alert_threshold': 50,
+ 'is_harvest_enabled': True,
+ 'pool_harvest_high_threshold': 59,
+ 'pool_harvest_low_threshold': 40,
+ 'is_snap_harvest_enabled': True,
+ 'snap_harvest_high_threshold': 80,
+ 'snap_harvest_low_threshold': 60,
+ 'fast_vp': "enabled",
+ 'fast_cache': "disabled",
+ 'pool_type': 'TRADITIONAL',
+ 'state': 'present'
+ })
+ storagepool_module_mock.module.params = self.get_module_args
+ storagepool_module_mock.get_details = MagicMock(return_value=None)
+ utils.UnityPool = MagicMock()
+ storagepool_module_mock.get_raid_groups_response = MagicMock(side_effect=MockApiException)
+ storagepool_module_mock.perform_module_operation()
+ assert MockStoragePoolApi.create_pool_response('error') in \
+ storagepool_module_mock.module.fail_json.call_args[1]['msg']
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_volume.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_volume.py
new file mode 100644
index 000000000..1081f8c07
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_volume.py
@@ -0,0 +1,128 @@
+# Copyright: (c) 2023, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Unit Tests for volume module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from mock.mock import MagicMock
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_volume_api \
+ import MockVolumeApi
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \
+ import MockApiException
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+utils.get_logger = MagicMock()
+utils.get_unity_management_host_parameters = MagicMock()
+utils.ensure_required_libs = MagicMock()
+utils.get_unity_unisphere_connection = MagicMock()
+utils.UnityPool = MagicMock()
+utils.UnityPool.get_size_in_gb = MagicMock()
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+
+from ansible_collections.dellemc.unity.plugins.modules.volume import Volume
+
+
+class TestUnityVolume():
+
+ get_module_args = MockVolumeApi.VOLUME_MODULE_ARGS
+
+ @pytest.fixture
+ def volume_module_mock(self):
+ volume_module_mock = Volume()
+ volume_module_mock.conn = MagicMock()
+ return volume_module_mock
+
+ def test_create_volume(self, volume_module_mock):
+ self.get_module_args.update({
+ 'vol_name': "Atest",
+ 'pool_name': "Extreme_Perf_tier",
+ 'size': 2,
+ 'cap_unit': "GB",
+ 'is_thin': True,
+ 'compression': True,
+ 'advanced_dedup': True,
+ 'state': 'present'
+ })
+ volume_module_mock.module.params = self.get_module_args
+ volume_module_mock.host_access_modify_required = MagicMock(return_value=False)
+ obj_pool = MockSDKObject(MockVolumeApi.pool)
+ volume_object = MockVolumeApi.create_volume_response('api')['volume_details']
+ volume_module_mock.unity_conn.get_pool = MagicMock(return_value=obj_pool)
+ volume_module_mock.unity_conn.get_lun = MagicMock(return_value=None)
+ obj_pool.create_lun = MagicMock(return_value=MockSDKObject(volume_object))
+ volume_module_mock.perform_module_operation()
+ assert volume_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_create_volume_exception(self, volume_module_mock):
+ self.get_module_args.update({
+ 'vol_name': "Atest",
+ 'pool_name': "Extreme_Perf_tier",
+ 'size': 2,
+ 'cap_unit': "GB",
+ 'is_thin': True,
+ 'compression': True,
+ 'advanced_dedup': True,
+ 'state': 'present'
+ })
+ volume_module_mock.module.params = self.get_module_args
+ volume_module_mock.host_access_modify_required = MagicMock(return_value=False)
+ obj_pool = MockSDKObject(MockVolumeApi.pool)
+ volume_module_mock.unity_conn.get_pool = MagicMock(return_value=obj_pool)
+ volume_module_mock.unity_conn.get_lun = MagicMock(return_value=None)
+ obj_pool.create_lun = MagicMock(side_effect=MockApiException)
+ volume_module_mock.perform_module_operation()
+ assert MockVolumeApi.create_volume_response('error') in \
+ volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_modify_volume(self, volume_module_mock):
+ self.get_module_args.update({
+ 'vol_name': "Atest",
+ 'pool_name': "Extreme_Perf_tier",
+ 'size': 2,
+ 'cap_unit': "GB",
+ 'is_thin': True,
+ 'compression': True,
+ 'advanced_dedup': False,
+ 'state': 'present'
+ })
+ volume_module_mock.module.params = self.get_module_args
+ volume_module_mock.host_access_modify_required = MagicMock(return_value=False)
+ obj_vol = MockSDKObject(MockVolumeApi.modify_volume_response('api')['volume_details'])
+ volume_object = MockVolumeApi.modify_volume_response('api')['volume_details']
+ volume_module_mock.unity_conn.get_lun = MagicMock(return_value=obj_vol)
+ obj_vol.modify = MagicMock(return_value=MockSDKObject(volume_object))
+ volume_module_mock.volume_modify_required = MagicMock()
+ volume_module_mock.get_volume_display_attributes = MagicMock()
+ volume_module_mock.perform_module_operation()
+ assert volume_module_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_modify_volume_exception(self, volume_module_mock):
+ self.get_module_args.update({
+ 'vol_name': "Atest",
+ 'pool_name': "Extreme_Perf_tier",
+ 'size': 2,
+ 'cap_unit': "GB",
+ 'is_thin': True,
+ 'compression': True,
+ 'advanced_dedup': False,
+ 'state': 'present'
+ })
+ volume_module_mock.module.params = self.get_module_args
+ volume_module_mock.host_access_modify_required = MagicMock(return_value=False)
+ obj_vol = MockSDKObject(MockVolumeApi.modify_volume_response('api')['volume_details'])
+ volume_module_mock.unity_conn.get_lun = MagicMock(return_value=obj_vol)
+ obj_vol.modify = MagicMock(side_effect=MockApiException)
+ volume_module_mock.volume_modify_required = MagicMock()
+ volume_module_mock.get_volume_display_attributes = MagicMock()
+ volume_module_mock.perform_module_operation()
+ assert MockVolumeApi.modify_volume_response('error') in \
+ volume_module_mock.module.fail_json.call_args[1]['msg']