summaryrefslogtreecommitdiffstats
path: root/ansible_collections/netapp/elementsw
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/netapp/elementsw')
-rw-r--r--ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/bug_report.yml210
-rw-r--r--ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/feature_request.yml100
-rw-r--r--ansible_collections/netapp/elementsw/.github/workflows/coverage.yml45
-rw-r--r--ansible_collections/netapp/elementsw/.github/workflows/main.yml47
-rw-r--r--ansible_collections/netapp/elementsw/CHANGELOG.rst192
-rw-r--r--ansible_collections/netapp/elementsw/FILES.json649
-rw-r--r--ansible_collections/netapp/elementsw/MANIFEST.json34
-rw-r--r--ansible_collections/netapp/elementsw/README.md133
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/changelog.yaml221
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/config.yaml32
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/20.2.0.yaml3
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/20.6.0.yaml2
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/20.8.0.yaml21
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/20.9.0.yaml7
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/2019.10.0.yaml2
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3117.yaml2
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3174.yaml2
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3188.yaml2
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3196.yaml2
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3235.yaml2
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3310.yml2
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3324.yaml2
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3731.yaml4
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3733.yaml4
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3734.yaml2
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3800.yaml2
-rw-r--r--ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-4416.yaml2
-rw-r--r--ansible_collections/netapp/elementsw/meta/runtime.yml28
-rw-r--r--ansible_collections/netapp/elementsw/plugins/doc_fragments/netapp.py51
-rw-r--r--ansible_collections/netapp/elementsw/plugins/module_utils/netapp.py107
-rw-r--r--ansible_collections/netapp/elementsw/plugins/module_utils/netapp_elementsw_module.py206
-rw-r--r--ansible_collections/netapp/elementsw/plugins/module_utils/netapp_module.py225
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group.py397
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group_volumes.py247
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_account.py340
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_admin_users.py233
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_backup.py243
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_check_connections.py154
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster.py372
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_config.py331
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_pair.py206
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_snmp.py365
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_drive.py368
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_info.py272
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_initiators.py343
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_ldap.py254
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_network_interfaces.py423
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_node.py357
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_qos_policy.py270
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot.py369
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_restore.py203
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_schedule.py586
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_vlan.py274
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume.py413
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_clone.py276
-rw-r--r--ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_pair.py293
-rw-r--r--ansible_collections/netapp/elementsw/requirements.txt1
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/compat/__init__.py0
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/compat/builtins.py33
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/compat/mock.py122
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/compat/unittest.py44
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group.py175
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py245
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_account.py137
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster.py228
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_config.py157
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py176
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_info.py344
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_initiators.py201
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py293
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_nodes.py324
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_qos_policy.py300
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_template.py138
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_vlan.py343
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_volume.py364
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/plugins/modules_utils/test_netapp_module.py149
-rw-r--r--ansible_collections/netapp/elementsw/tests/unit/requirements.txt1
77 files changed, 13707 insertions, 0 deletions
diff --git a/ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/bug_report.yml b/ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 000000000..93fbe057a
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,210 @@
+---
+name: 🐛 Bug report
+description: Create a report to help us improve
+
+body:
+- type: markdown
+ attributes:
+ value: >
+ **Thank you for wanting to report a bug in netapp.elementsw!**
+
+
+ ⚠
+ Verify first that your issue is not [already reported on
+ GitHub][issue search] and keep in mind that we may have to keep
+ the current behavior because [every change breaks someone's
+ workflow][XKCD 1172].
+ We try to be mindful about this.
+
+ Also test if the latest release and devel branch are affected too.
+
+
+ **Tip:** If you are seeking community support, please consider
+ [Join our Slack community][ML||IRC].
+
+
+
+ [ML||IRC]:
+ https://join.slack.com/t/netapppub/shared_invite/zt-njcjx2sh-1VR2mEDvPcJAmPutOnP~mg
+
+ [issue search]: ../search?q=is%3Aissue&type=issues
+
+ [XKCD 1172]: https://xkcd.com/1172/
+
+
+- type: textarea
+ attributes:
+ label: Summary
+ description: Explain the problem briefly below.
+ placeholder: >-
+ When I try to do X with netapp.elementsw from the devel branch on GitHub, Y
+ breaks in a way Z under the env E. Here are all the details I know
+ about this problem...
+ validations:
+ required: true
+
+- type: input
+ attributes:
+ label: Component Name
+ description: >
+ Write the short name of the rst file, module, plugin, task or
+ feature below, *use your best guess if unsure*.
+
+
+ **Tip:** Cannot find it in this repository? Please be advised that
+ the source for some parts of the documentation are hosted outside
+ of this repository. If the page you are reporting describes
+ modules/plugins/etc that are not officially supported by the
+ Ansible Core Engineering team, there is a good chance that it is
+ coming from one of the [Ansible Collections maintained by the
+ community][collections org]. If this is the case, please make sure
+ to file an issue under the appropriate project there instead.
+
+
+ [collections org]: /ansible-collections
+ placeholder: dnf, apt, yum, pip, user etc.
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Ansible Version
+ description: >-
+ Paste verbatim output from `ansible --version` below, under
+ the prompt line. Please don't wrap it with tripple backticks — your
+ whole input will be turned into a code snippet automatically.
+ render: console
+ value: |
+ $ ansible --version
+ placeholder: |
+ $ ansible --version
+ ansible [core 2.11.0b4.post0] (detached HEAD ref: refs/) last updated 2021/04/02 00:33:35 (GMT +200)
+ config file = None
+ configured module search path = ['~/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
+ ansible python module location = ~/src/github/ansible/ansible/lib/ansible
+ ansible collection location = ~/.ansible/collections:/usr/share/ansible/collections
+ executable location = bin/ansible
+ python version = 3.9.0 (default, Oct 26 2020, 13:08:59) [GCC 10.2.0]
+ jinja version = 2.11.3
+ libyaml = True
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: ElementSW Collection Version
+ description: >-
+ ElementSW Collection Version. Run `ansible-galaxy collection` and copy the entire output
+ render: console
+ value: |
+ $ ansible-galaxy collection list
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Playbook
+ description: >-
+ The task from the playbook that is give you the issue
+ render: console
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Steps to Reproduce
+ description: |
+ Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ value: |
+ <!--- Paste example playbooks or commands between quotes below -->
+ ```yaml (paste below)
+
+ ```
+ placeholder: |
+ 1. Implement the following playbook:
+
+ ```yaml
+ ---
+ # ping.yml
+ - hosts: all
+ gather_facts: false
+ tasks:
+ - ping:
+ ...
+ ```
+ 2. Then run `ANSIBLE_DEBUG=1 ansible-playbook ping.yml -vvvvv`
+ 3. An error occurs.
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Expected Results
+ description: >-
+ Describe what you expected to happen when running the steps above.
+ placeholder: >-
+ I expected X to happen because I assumed Y and was shocked
+ that it did not.
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Actual Results
+ description: |
+ Describe what actually happened. If possible run with extra verbosity (`-vvvv`).
+
+ Paste verbatim command output and don't wrap it with tripple backticks — your
+ whole input will be turned into a code snippet automatically.
+ render: console
+ placeholder: >-
+ Certificate did not match expected hostname: files.pythonhosted.org. Certificate: {'notAfter': 'Apr 28 19:20:25 2021 GMT', 'subjectAltName': ((u'DNS', 'r.ssl.fastly.net'), (u'DNS', '*.catchpoint.com'), (u'DNS', '*.cnn.io'), (u'DNS', '*.dollarshaveclub.com'), (u'DNS', '*.eater.com'), (u'DNS', '*.fastly.picmonkey.com'), (u'DNS', '*.files.saymedia-content.com'), (u'DNS', '*.ft.com'), (u'DNS', '*.meetupstatic.com'), (u'DNS', '*.nfl.com'), (u'DNS', '*.pagar.me'), (u'DNS', '*.picmonkey.com'), (u'DNS', '*.realself.com'), (u'DNS', '*.sbnation.com'), (u'DNS', '*.shakr.com'), (u'DNS', '*.streamable.com'), (u'DNS', '*.surfly.com'), (u'DNS', '*.theverge.com'), (u'DNS', '*.thrillist.com'), (u'DNS', '*.vox-cdn.com'), (u'DNS', '*.vox.com'), (u'DNS', '*.voxmedia.com'), (u'DNS', 'eater.com'), (u'DNS', 'ft.com'), (u'DNS', 'i.gse.io'), (u'DNS', 'picmonkey.com'), (u'DNS', 'realself.com'), (u'DNS', 'static.wixstatic.com'), (u'DNS', 'streamable.com'), (u'DNS', 'surfly.com'), (u'DNS', 'theverge.com'), (u'DNS', 'vox-cdn.com'), (u'DNS', 'vox.com'), (u'DNS', 'www.joyent.com')), 'subject': ((('countryName', u'US'),), (('stateOrProvinceName', u'California'),), (('localityName', u'San Francisco'),), (('organizationName', u'Fastly, Inc'),), (('commonName', u'r.ssl.fastly.net'),))}
+ Exception:
+ Traceback (most recent call last):
+ File "/usr/local/lib/python2.6/dist-packages/pip/basecommand.py", line 215, in main
+ status = self.run(options, args)
+ File "/usr/local/lib/python2.6/dist-packages/pip/commands/install.py", line 335, in run
+ wb.build(autobuilding=True)
+ File "/usr/local/lib/python2.6/dist-packages/pip/wheel.py", line 749, in build
+ self.requirement_set.prepare_files(self.finder)
+ File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 380, in prepare_files
+ ignore_dependencies=self.ignore_dependencies))
+ File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 620, in _prepare_file
+ session=self.session, hashes=hashes)
+ File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 821, in unpack_url
+ hashes=hashes
+ File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 659, in unpack_http_url
+ hashes)
+ File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 853, in _download_http_url
+ stream=True,
+ File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 521, in get
+ return self.request('GET', url, **kwargs)
+ File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 386, in request
+ return super(PipSession, self).request(method, url, *args, **kwargs)
+ File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 508, in request
+ resp = self.send(prep, **send_kwargs)
+ File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 618, in send
+ r = adapter.send(request, **kwargs)
+ File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/cachecontrol/adapter.py", line 47, in send
+ resp = super(CacheControlAdapter, self).send(request, **kw)
+ File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/adapters.py", line 506, in send
+ raise SSLError(e, request=request)
+ SSLError: HTTPSConnectionPool(host='files.pythonhosted.org', port=443): Max retries exceeded with url: /packages/ef/ab/aa12712415809bf698e719b307419f953e25344e8f42d557533d7a02b276/netapp_lib-2020.7.16-py2-none-any.whl (Caused by SSLError(CertificateError("hostname 'files.pythonhosted.org' doesn't match either of 'r.ssl.fastly.net', '*.catchpoint.com', '*.cnn.io', '*.dollarshaveclub.com', '*.eater.com', '*.fastly.picmonkey.com', '*.files.saymedia-content.com', '*.ft.com', '*.meetupstatic.com', '*.nfl.com', '*.pagar.me', '*.picmonkey.com', '*.realself.com', '*.sbnation.com', '*.shakr.com', '*.streamable.com', '*.surfly.com', '*.theverge.com', '*.thrillist.com', '*.vox-cdn.com', '*.vox.com', '*.voxmedia.com', 'eater.com', 'ft.com', 'i.gse.io', 'picmonkey.com', 'realself.com', 'static.wixstatic.com', 'streamable.com', 'surfly.com', 'theverge.com', 'vox-cdn.com', 'vox.com', 'www.joyent.com'",),))
+ ERROR: Command "/usr/bin/python2.6 /root/ansible/test/lib/ansible_test/_data/quiet_pip.py install --disable-pip-version-check -r /root/ansible/test/lib/ansible_test/_data/requirements/units.txt -r tests/unit/requirements.txt -c /root/ansible/test/lib/ansible_test/_data/requirements/constraints.txt" returned exit status 2.
+ ERROR: Command "docker exec d47eb360db4ce779c1f690db964655b76e68895c4360ff252c46fe7fe6f5c75a /usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/root/ansible_collections/netapp/ontap LC_ALL=en_US.UTF-8 /usr/bin/python3.6 /root/ansible/bin/ansible-test units --metadata tests/output/.tmp/metadata-9i2qfrcl.json --truncate 200 --redact --color yes --requirements --python default --requirements-mode only" returned exit status 1.
+ validations:
+ required: true
+
+
+- type: markdown
+ attributes:
+ value: >
+ *One last thing...*
+
+
+ Thank you for your collaboration!
+
+
+...
diff --git a/ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/feature_request.yml b/ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 000000000..8bb6094c7
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,100 @@
+---
+name: ✨ Feature request
+description: Suggest an idea for this project
+
+body:
+- type: markdown
+ attributes:
+ value: >
+ **Thank you for wanting to suggest a feature for netapp.elementsw!**
+
+ 💡
+ Before you go ahead with your request, please first consider if it
+ would be useful for majority of the netapp.elementsw users. As a
+ general rule of thumb, any feature that is only of interest to a
+ small sub group should be [implemented in a third-party Ansible
+ Collection][contribute to collections] or maybe even just your
+ project alone. Be mindful of the fact that the essential
+ netapp.elementsw features have a broad impact.
+
+
+ <details>
+ <summary>
+ ❗ Every change breaks someone's workflow.
+ </summary>
+
+
+ [![❗ Every change breaks someone's workflow.
+ ](https://imgs.xkcd.com/comics/workflow.png)
+ ](https://xkcd.com/1172/)
+ </details>
+
+
+ ⚠
+ Verify first that your idea is not [already requested on
+ GitHub][issue search].
+
+ Also test if the main branch does not already implement this.
+
+
+- type: textarea
+ attributes:
+ label: Summary
+ description: >
+ Describe the new feature/improvement you would like briefly below.
+
+
+ What's the problem this feature will solve?
+
+ What are you trying to do, that you are unable to achieve
+ with netapp.elementsw as it currently stands?
+
+
+ * Provide examples of real-world use cases that this would enable
+ and how it solves the problem you described.
+
+ * How do you solve this now?
+
+ * Have you tried to work around the problem using other tools?
+
+ * Could there be a different approach to solving this issue?
+
+ placeholder: >-
+ I am trying to do X with netapp.elementsw from the devel branch on GitHub and
+ I think that implementing a feature Y would be very helpful for me and
+ every other user of netapp.elementsw because of Z.
+ validations:
+ required: true
+
+- type: input
+ attributes:
+ label: Component Name
+ description: >
+ Write the short name of the module, plugin, task or feature below,
+ *use your best guess if unsure*.
+
+
+ [collections org]: /ansible-collections
+ placeholder: dnf, apt, yum, pip, user etc.
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Additional Information
+ description: |
+ Describe how the feature would be used, why it is needed and what it would solve.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ value: |
+ <!--- Paste example playbooks or commands between quotes below -->
+ ```yaml (paste below)
+
+ ```
+ placeholder: >-
+ I asked on https://stackoverflow.com/.... and the community
+ advised me to do X, Y and Z.
+ validations:
+ required: true
+
+...
diff --git a/ansible_collections/netapp/elementsw/.github/workflows/coverage.yml b/ansible_collections/netapp/elementsw/.github/workflows/coverage.yml
new file mode 100644
index 000000000..9e2692651
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/.github/workflows/coverage.yml
@@ -0,0 +1,45 @@
+name: NetApp.elementsw Ansible Coverage
+
+on:
+ push:
+ pull_request:
+ schedule:
+ - cron: '0 6 * * *'
+
+jobs:
+ sanity:
+ name: Coverage on elementsw
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v2
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.8
+
+ - name: Install ansible stable-2.11
+ run: pip install https://github.com/ansible/ansible/archive/stable-2.11.tar.gz --disable-pip-version-check
+
+ - name: Make directory to make ansible-test happy
+ run: |
+ pwd
+ mkdir -p ansible_collections/netapp/elementsw/
+ rsync -av . ansible_collections/netapp/elementsw/ --exclude ansible_collections/netapp/elementsw/
+
+ - name: Run Unit Tests
+ run: ansible-test units --coverage --color --docker --python 3.8
+ working-directory: ansible_collections/netapp/elementsw/
+
+ # ansible-test support producing code coverage date
+ - name: Generate coverage report
+ run: ansible-test coverage xml -v --requirements --group-by command --group-by version
+ working-directory: ansible_collections/netapp/elementsw/
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v2
+ with:
+ working-directory: ansible_collections/netapp/elementsw/
+ verbose: true \ No newline at end of file
diff --git a/ansible_collections/netapp/elementsw/.github/workflows/main.yml b/ansible_collections/netapp/elementsw/.github/workflows/main.yml
new file mode 100644
index 000000000..2b9ec2379
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/.github/workflows/main.yml
@@ -0,0 +1,47 @@
+name: NetApp.elementsw Ansible CI
+
+on:
+ push:
+ pull_request:
+ schedule:
+ - cron: '0 6 * * *'
+
+jobs:
+ sanity:
+ name: Sanity (${{ matrix.ansible }} on Elementsw
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ ansible:
+ - stable-2.9
+ - stable-2.10
+ - stable-2.11
+ - stable-2.12
+ - devel
+
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v2
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.8
+
+ - name: Install ansible (${{ matrix.ansible }})
+ run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check
+
+ - name: Make directory to make ansible-test happy
+ run: |
+ pwd
+ mkdir -p ansible_collections/netapp/elementsw/
+ rsync -av . ansible_collections/netapp/elementsw/ --exclude ansible_collections/netapp/elementsw/
+
+
+ - name: Run sanity tests Elementsw
+ run: ansible-test sanity --docker -v --color
+ working-directory: ansible_collections/netapp/elementsw/
+
+ - name: Run Unit Tests
+ run: ansible-test units --docker -v --color
+ working-directory: ansible_collections/netapp/elementsw/
diff --git a/ansible_collections/netapp/elementsw/CHANGELOG.rst b/ansible_collections/netapp/elementsw/CHANGELOG.rst
new file mode 100644
index 000000000..a611ba793
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/CHANGELOG.rst
@@ -0,0 +1,192 @@
+=========================================
+NetApp ElementSW Collection Release Notes
+=========================================
+
+.. contents:: Topics
+
+
+v21.7.0
+=======
+
+Minor Changes
+-------------
+
+- PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+.
+
+v21.6.1
+=======
+
+Bugfixes
+--------
+
+- requirements.txt - point to the correct python dependency
+
+v21.3.0
+=======
+
+Minor Changes
+-------------
+
+- na_elementsw_info - add ``cluster_nodes`` and ``cluster_drives``.
+- na_elementsw_qos_policy - explicitly define ``minIOPS``, ``maxIOPS``, ``burstIOPS`` as int.
+
+Bugfixes
+--------
+
+- na_elementsw_drive - lastest SDK does not accept ``force_during_bin_sync`` and ``force_during_upgrade``.
+- na_elementsw_qos_policy - loop would convert `minIOPS`, `maxIOPS`, `burstIOPS` to str, causing type mismatch issues in comparisons.
+- na_elementsw_snapshot_schedule - change of interface in SDK ('ScheduleInfo' object has no attribute 'minutes')
+
+v20.11.0
+========
+
+Minor Changes
+-------------
+
+- na_elementsw_snapshot_schedule - Add ``retention`` in examples.
+
+Bugfixes
+--------
+
+- na_elementsw_drive - Object of type 'dict_values' is not JSON serializable.
+
+v20.10.0
+========
+
+Minor Changes
+-------------
+
+- na_elementsw_cluster - add new options ``encryption``, ``order_number``, and ``serial_number``.
+- na_elementsw_network_interfaces - make all options not required, so that only bond_1g can be set for example.
+- na_elementsw_network_interfaces - restructure options into 2 dictionaries ``bond_1g`` and ``bond_10g``, so that there is no shared option. Disallow all older options.
+
+New Modules
+-----------
+
+- netapp.elementsw.na_elementsw_info - NetApp Element Software Info
+
+v20.9.1
+=======
+
+Bugfixes
+--------
+
+- na_elementsw_node - improve error reporting when cluster name cannot be set because node is already active.
+- na_elementsw_schedule - missing imports TimeIntervalFrequency, Schedule, ScheduleInfo have been added back
+
+v20.9.0
+=======
+
+Minor Changes
+-------------
+
+- na_elementsw_node - ``cluster_name`` to set the cluster name on new nodes.
+- na_elementsw_node - ``preset_only`` to only set the cluster name before creating a cluster with na_elementsw_cluster.
+- na_elementsw_volume - ``qos_policy_name`` to provide a QOS policy name or ID.
+
+Bugfixes
+--------
+
+- na_elementsw_node - fix check_mode so that no action is taken.
+
+New Modules
+-----------
+
+- netapp.elementsw.na_elementsw_qos_policy - NetApp Element Software create/modify/rename/delete QOS Policy
+
+v20.8.0
+=======
+
+Minor Changes
+-------------
+
+- add "required:true" where missing.
+- add "type:str" (or int, dict) where missing in documentation section.
+- na_elementsw_drive - add all drives in a cluster, allow for a list of nodes or a list of drives.
+- remove "required:true" for state and use present as default.
+- use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9.
+
+Bugfixes
+--------
+
+- na_elementsw_access_group - fix check_mode so that no action is taken.
+- na_elementsw_admin_users - fix check_mode so that no action is taken.
+- na_elementsw_cluster - create cluster if it does not exist. Do not expect MVIP or SVIP to exist before create.
+- na_elementsw_cluster_snmp - double exception because of AttributeError.
+- na_elementsw_drive - node_id or drive_id were not handled properly when using numeric ids.
+- na_elementsw_initiators - volume_access_group_id was ignored. volume_access_groups was ignored and redundant.
+- na_elementsw_ldap - double exception because of AttributeError.
+- na_elementsw_snapshot_schedule - ignore schedules being deleted (idempotency), remove default values and fix documentation.
+- na_elementsw_vlan - AttributeError if VLAN already exists.
+- na_elementsw_vlan - change in attributes was ignored.
+- na_elementsw_vlan - fix check_mode so that no action is taken.
+- na_elementsw_volume - Argument '512emulation' in argument_spec is not a valid python identifier - renamed to enable512emulation.
+- na_elementsw_volume - double exception because of AttributeError.
+
+v20.6.0
+=======
+
+Bugfixes
+--------
+
+- galaxy.yml - fix repository and homepage links.
+
+v20.2.0
+=======
+
+Bugfixes
+--------
+
+- galaxy.yml - fix path to github repository.
+- netapp.py - report error in case of connection error rather than raising a generic exception by default.
+
+v20.1.0
+=======
+
+New Modules
+-----------
+
+- netapp.elementsw.na_elementsw_access_group_volumes - NetApp Element Software Add/Remove Volumes to/from Access Group
+
+v19.10.0
+========
+
+Minor Changes
+-------------
+
+- refactor existing modules as a collection
+
+v2.8.0
+======
+
+New Modules
+-----------
+
+- netapp.elementsw.na_elementsw_cluster_config - Configure Element SW Cluster
+- netapp.elementsw.na_elementsw_cluster_snmp - Configure Element SW Cluster SNMP
+- netapp.elementsw.na_elementsw_initiators - Manage Element SW initiators
+
+v2.7.0
+======
+
+New Modules
+-----------
+
+- netapp.elementsw.na_elementsw_access_group - NetApp Element Software Manage Access Groups
+- netapp.elementsw.na_elementsw_account - NetApp Element Software Manage Accounts
+- netapp.elementsw.na_elementsw_admin_users - NetApp Element Software Manage Admin Users
+- netapp.elementsw.na_elementsw_backup - NetApp Element Software Create Backups
+- netapp.elementsw.na_elementsw_check_connections - NetApp Element Software Check connectivity to MVIP and SVIP.
+- netapp.elementsw.na_elementsw_cluster - NetApp Element Software Create Cluster
+- netapp.elementsw.na_elementsw_cluster_pair - NetApp Element Software Manage Cluster Pair
+- netapp.elementsw.na_elementsw_drive - NetApp Element Software Manage Node Drives
+- netapp.elementsw.na_elementsw_ldap - NetApp Element Software Manage ldap admin users
+- netapp.elementsw.na_elementsw_network_interfaces - NetApp Element Software Configure Node Network Interfaces
+- netapp.elementsw.na_elementsw_node - NetApp Element Software Node Operation
+- netapp.elementsw.na_elementsw_snapshot - NetApp Element Software Manage Snapshots
+- netapp.elementsw.na_elementsw_snapshot_restore - NetApp Element Software Restore Snapshot
+- netapp.elementsw.na_elementsw_snapshot_schedule - NetApp Element Software Snapshot Schedules
+- netapp.elementsw.na_elementsw_vlan - NetApp Element Software Manage VLAN
+- netapp.elementsw.na_elementsw_volume - NetApp Element Software Manage Volumes
+- netapp.elementsw.na_elementsw_volume_clone - NetApp Element Software Create Volume Clone
+- netapp.elementsw.na_elementsw_volume_pair - NetApp Element Software Volume Pair
diff --git a/ansible_collections/netapp/elementsw/FILES.json b/ansible_collections/netapp/elementsw/FILES.json
new file mode 100644
index 000000000..7113c56bd
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/FILES.json
@@ -0,0 +1,649 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bd0735ea0d7847ed0f372da0cf7d7f8a0a2471aec49b5c16901d1c32793e43e",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/netapp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd42778f85cd3b989604d0227af4cc90350d94f5864938eb0bd29cf7a66401c3",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/netapp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc9a4b7d4d77cf221f256e5972707d08f424f319b856ef4a8fdd0dbe9a3dc322",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/netapp_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a98ea2d0aec17e10c6b5a956cfaa1dcddbd336b674079a1f86e85429381a49e7",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/netapp_elementsw_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "33132c95ba546d56bf953e1613dd39ad8a258379b3a32120f7be8b19e2c0d8a2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_initiators.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a0e280ee9ef13b994f98c848524dc53b3a3a16559e3d1e22be6573272327c8c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_qos_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4934c116271845de9f5da2f9747042601e961bc929f3a22397961313b3888e06",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_cluster_snmp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ee85a0b9e6ac2b0151a52b7722a43ea3e358d48f48816f5fac597151fd58d93",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_snapshot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "25b0f4b869b1b814160da50df5b7b06d0e5d3eb83ca8887a0fead337699d6c62",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a4b329b6f3c13f500a95ad0fb40eba4db5873b78b0c137997c858229336011af",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_access_group_volumes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "532fbf39ed0ee98af0e9323f037ab0e0f52d5eac9179a82eeb169a5a48cdfd3e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_snapshot_schedule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a07aa78ae73ec965592b77bad72bbedd724b519e82f51805d5fd414d3f9c414",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_node.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6882747383c770c6ec43585e3a4db0081c8de165415d40941532324208e3aa4e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_access_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7099bfffb1ec35ed7c0a40c0708cb4d1d79f6267b16fcc71f759796add15edaf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_cluster_pair.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddd54266eb0a3ebf891d8c1310059b40cfbad7679db3d7f2b9c600baf31e42ca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_volume_pair.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ead937f30287dfd02521b4fdda1e0a128cd1d3ba8db4a721330ff4bbfb76e284",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_cluster_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6dc94b752a4931e30ea169f61aec3919a7cd7636ce3aeff4764094d2adc355f7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "983415a406d31e2edd3e06b64745363e0d1c5ee7575058298bfdce6919522e31",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_ldap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b8a59c8c45c1aa147c2d90b01654135f31ac4a1e31c643ce3b07007d6f28ea9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_vlan.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39414c4cb613271d96220d275f027404e41e4b5dd61db5c7ad6eb3f70bc3243b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_cluster.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d42be06f947c782d42fdd9141daeb87374855fc996ecfc53a450e20216cc6e05",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_volume_clone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05f518bb36b88476c0a6dc329587400937c88c64bb335bd0f3ad279c79cf845e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_check_connections.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54458477eb0807256e663f64924d88cf5a5cb8058c0e7212a155a4aff9f87997",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_drive.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d7a53bf79e58150eff5f6979890afb54a6859597121a4cee0e7b4e6020f0eb0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_account.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7dbfc7b05e3c69ebbb1723314094d62e07a4b328cba09db899808fd50d38bc15",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_snapshot_restore.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d70395bc1a83498c08081aaa31fa4e5bb8ebfccbc03b7c9f1cb0aa6a4d132c9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_backup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b545334782c314c7c2c8e857f85838859b461176369ed002f3fba7414062b809",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_network_interfaces.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d045d9768f1b469c3aeda533dbfdcbdb5a2f51a2d9949c59a3f73b56959ca082",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_elementsw_admin_users.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b822e729b9e40361b148fd9739fddf1c26705597a092b5d967e29676eed9fb66",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/unittest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cba95d18c5b39c6f49714eacf1ac77452c2e32fa087c03cf01aacd19ae597b0f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/builtins.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ca4cac919e166b25e601e11acb01f6957dddd574ff0a62569cb994a5ecb63e1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/mock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da9e4399d51f4aa7e39d11a4c8adb3ea291252334eeebc6e5569777c717739da",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_elementsw_cluster.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66d9f46f9b572b24f6465f43d2aebfb43f3fe2858ad528472559ba089dc2fb3c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6aa0100e51bbe54b6e9edeb072b7de526542e55da1cede0d1ae5f4367ec89eb",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_elementsw_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d910be3c377edddb04f6f74c3e4908a9d6d32c71ec251cf74e9eaa6711b1bffe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_elementsw_vlan.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9390907ec097add3aa2d936dd95f63d05bfac2b5b730ae12df50d14c5a18e0c1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_elementsw_nodes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b563b9adab2f4c7a67354fa2b7a2e3468cf68b041ba51c788e0e082e4b50b7ba",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_elementsw_cluster_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae4c8e648a16dfa704964ef0f3782ea27adec2f1c0ceb5fca84ab86e888caffa",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_elementsw_qos_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "189242c5691fba4c436403cbfeb512fdab01c8bd35b028d7262b4cdeca9c7376",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_elementsw_account.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5002081bc3177a94e5b2911259138ba80b2cf03006c6333c78cc50731f89fbbe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_elementsw_initiators.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5cc8b59e5120ff8f6b51a9b2085d336f63c5b91d7d3f21db629176c92c2f011",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "655c454425b97c72bb924b5def11e8dc65dd9dc4cd40cf00df66ae85120ba40f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_elementsw_template.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb1802b2cd87193966ccc7d8b0c6c94522d7954bfada73febb8aeae77367322c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "489f21207a0de4f7ab263096c0f2d2c674cb9a334b45edb76165f7a933b13c5e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_elementsw_access_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4682bf1c6d258032a9a9b001254246a2993e006ab2aa32463e42bed5e192e09f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_elementsw_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a138bc7c455af917d85a69c4e010ae92cda34cff767fe7d0514806ab82d22b0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules_utils/test_netapp_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a40d8651793b9771d6f56d5e8b52772597a77e317002a9f9bf3400cffd014d60",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a2a08d11b2cf3859e796da8a7928461df41efdd14abbc7e4234a37da5ca19c4",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3734.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "419f9e02843f2fc7b584c8d3a4160769b1939784dbc0f726c55daeca0bc6bef9",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3324.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "784b39c5d9440affb1dbab3ba8769ec1e88e7570798448c238a77d32dbf6e505",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/20.9.0.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56bed0aab9696af7068eb1bb743eb316ab23c3200ac6faa715a303e5f33f0973",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3196.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "94573d6e6ddde5f8a053d72a7e49d87d13c4274f5ea5c24c6c0a95947215977b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3800.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6fc0ea3ba25f76222015eba223c4a88c7d36b52cb5d767a5c3a9374746532a5e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3733.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a1ce243b30c79588a96fac9c050487d9b9ea63208e9c30934b7af77cc24dfe4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/2019.10.0.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b1a5ef7df5f1e6e66ddc013149aea0480eb79f911a0563e2e6d7d9af79d5572",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3174.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7cfc4addbf3343a3ce121f5de6cc2cc8244ad7b62a7429c2694543dabc2a8ccf",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/20.2.0.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c98764e792ed6c6d9cee6df80b9fff8f4fcadaf765c0aa0f0ed3dd5e3080fec",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3117.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "242f770eafb49994810a3263e23e1d342aeb36396819045c48f491810aab6908",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3731.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f92782e45a47a3439f8a858c3f283879fdc070422109d5a9ab2fdaa7ca56293",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3310.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8132aa931d13a49ba1a3c0fee131c048c6767ce17b3d9cabafa7e34f3c7c239a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3235.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cddb1135b1c15ca3c8f130bcc439d73ac819c7a3e0472c9ff358c75405bd8cb3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/20.8.0.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b13007f7b14dd35357ec0fb06b0e89cf5fee56036b0a6004dfb21c46010cb7c1",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3188.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0efa05e4cca58b1bfe30a60673adc266e7598d841065486b5b29c7e7a8b29bf4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/20.6.0.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6192b3cccdc7c1e1eb0d61a49dd20c6f234499b6dd9b52b2f974b673e99f7a47",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4416.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4224db573f34caeeb956c8728eb343a47bc2729d898001a4c6a671b780dae1bf",
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "70f470630a3fb893540ad9060634bfd0955e4a3371ab1a921e44bdc6b5ea1ba5",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad8dbe639e83e6feef631362bf2d78cde3c51c093203c0de8113b0d1cbc7756d",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ada0df4adf6ff17cdb5493e6050ec750fa13347ea71a6122a7e139f65f842b50",
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/coverage.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5fef29bf470c1567ed5ba3e3d5f227d21db4d23455c4fd12628e3e3ad80ddd76",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "140dc9b99f730080720586330df5ee7ef8f5e74b5898738d2b269ac52bbe4666",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/feature_request.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c8be00f495b1a0e20d3e4c2bca809b9eda7d2ab92e838bfad951dfa37e7b3d2",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/bug_report.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b59987ccd30474cf321e36496cc8b30464bdd816c5b3860d659356bc3e2a2a7f",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1818f97ced0b9d61cd4d65742e14cb618a333be7f734c1fee8bb420323e5373d",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/netapp/elementsw/MANIFEST.json b/ansible_collections/netapp/elementsw/MANIFEST.json
new file mode 100644
index 000000000..fda95d344
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/MANIFEST.json
@@ -0,0 +1,34 @@
+{
+ "collection_info": {
+ "namespace": "netapp",
+ "name": "elementsw",
+ "version": "21.7.0",
+ "authors": [
+ "NetApp Ansible Team <ng-ansibleteam@netapp.com>"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "storage",
+ "netapp",
+ "solidfire"
+ ],
+ "description": "Netapp ElementSW (Solidfire) Collection",
+ "license": [
+ "GPL-2.0-or-later"
+ ],
+ "license_file": null,
+ "dependencies": {},
+ "repository": "https://github.com/ansible-collections/netapp.elementsw",
+ "documentation": null,
+ "homepage": "https://netapp.io/configuration-management-and-automation/",
+ "issues": "https://github.com/ansible-collections/netapp.elementsw/issues"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "472a7d73c3fe2719a7c500eadc92b8f89ca852d2c5aee2b71d7afb688c97dc8c",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/netapp/elementsw/README.md b/ansible_collections/netapp/elementsw/README.md
new file mode 100644
index 000000000..96b62e64d
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/README.md
@@ -0,0 +1,133 @@
+[![Documentation](https://img.shields.io/badge/docs-brightgreen.svg)](https://docs.ansible.com/ansible/devel/collections/netapp/elementsw/index.html)
+![example workflow](https://github.com/ansible-collections/netapp.elementsw/actions/workflows/main.yml/badge.svg)
+[![codecov](https://codecov.io/gh/ansible-collections/netapp.elementsw/branch/main/graph/badge.svg?token=weBYkksxSi)](https://codecov.io/gh/ansible-collections/netapp.elementsw)
+
+
+netapp.elementSW
+
+NetApp ElementSW Collection
+
+Copyright (c) 2019 NetApp, Inc. All rights reserved.
+Specifications subject to change without notice.
+
+# Installation
+```bash
+ansible-galaxy collection install netapp.elementsw
+```
+To use Collection add the following to the top of your playbook, with out this you will be using Ansible 2.9 version of the module
+```
+collections:
+ - netapp.elementsw
+```
+
+# Module documentation
+https://docs.ansible.com/ansible/devel/collections/netapp/elementsw/
+
+# Need help
+Join our Slack Channel at [Netapp.io](http://netapp.io/slack)
+
+# Release Notes
+
+## 21.7.0
+
+### Minor changes
+ - all modules - enable usage of Ansible module group defaults - for Ansible 2.12+.
+
+## 21.6.1
+### Bug Fixes
+ - requirements.txt: point to the correct python dependency
+
+## 21.3.0
+
+### New Options
+ - na_elementsw_qos_policy: explicitly define `minIOPS`, `maxIOPS`, `burstIOPS` as int.
+
+### Minor changes
+ - na_elementsw_info - add `cluster_nodes` and `cluster_drives`.
+
+### Bug Fixes
+ - na_elementsw_drive - latest SDK does not accept ``force_during_bin_sync`` and ``force_during_upgrade``.
+ - na_elementsw_qos_policy - loop would convert `minIOPS`, `maxIOPS`, `burstIOPS` to str, causing type mismatch issues in comparisons.
+ - na_elementsw_snapshot_schedule - change of interface in SDK ('ScheduleInfo' object has no attribute 'minutes')
+
+## 20.11.0
+
+### Minor changes
+- na_elementsw_snapshot_schedule - Add `retention` in examples.
+
+### Bug Fixes
+- na_elementsw_drive - Object of type 'dict_values' is not JSON serializable.
+
+## 20.10.0
+
+### New Modules
+- na_elementsw_info: support for two subsets `cluster_accounts`, `node_config`.
+
+### New Options
+- na_elementsw_cluster: `encryption` to enable encryption at rest. `order_number` and `serial_number` for demo purposes.
+- na_elementsw_network_interfaces: restructure options, into 2 dictionaries `bond_1g` and `bond_10g`, so that there is no shared option. Disallow all older options.
+- na_elementsw_network_interfaces: make all options not required, so that only bond_1g can be set for example.
+
+## 20.9.1
+
+### Bug Fixes
+- na_elementsw_node: improve error reporting when cluster name cannot be set because node is already active.
+- na_elementsw_schedule - missing imports TimeIntervalFrequency, Schedule, ScheduleInfo have been added back
+
+## 20.9.0
+
+### New Modules
+- na_elementsw_qos_policy: create, modify, rename, or delete QOS policy.
+
+### New Options
+- na_elementsw_node: `cluster_name` to set the cluster name on new nodes.
+- na_elementsw_node: `preset_only` to only set the cluster name before creating a cluster with na_elementsw_cluster.
+- na_elementsw_volume: `qos_policy_name` to provide a QOS policy name or ID.
+
+### Bug Fixes
+- na_elementsw_node: fix check_mode so that no action is taken.
+
+## 20.8.0
+
+### New Options
+- na_elementsw_drive: add all drives in a cluster, allow for a list of nodes or a list of drives.
+
+### Bug Fixes
+- na_elementsw_access_group: fix check_mode so that no action is taken.
+- na_elementsw_admin_users: fix check_mode so that no action is taken.
+- na_elementsw_cluster: create cluster if it does not exist. Do not expect MVIP or SVIP to exist before create.
+- na_elementsw_cluster_snmp: double exception because of AttributeError.
+- na_elementsw_drive: node_id or drive_id were not handled properly when using numeric ids.
+- na_elementsw_initiators: volume_access_group_id was ignored. volume_access_groups was ignored and redundant.
+- na_elementsw_ldap: double exception because of AttributeError.
+- na_elementsw_snapshot_schedule: ignore schedules being deleted (idempotency), remove default values and fix documentation.
+- na_elementsw_vlan: AttributeError if VLAN already exists.
+- na_elementsw_vlan: fix check_mode so that no action is taken.
+- na_elementsw_vlan: change in attributes was ignored.
+- na_elementsw_volume: double exception because of AttributeError.
+- na_elementsw_volume: Argument '512emulation' in argument_spec is not a valid python identifier - renamed to enable512emulation.
+
+### Module documentation changes
+- use a three group format for `version_added`. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9.
+- add type: str (or int, dict) where missing in documentation section.
+- add required: true where missing.
+- remove required: true for state and use present as default.
+
+## 20.6.0
+### Bug Fixes
+- galaxy.xml: fix repository and homepage links
+
+## 20.2.0
+### Bug Fixes
+- galaxy.yml: fix path to github repository.
+- netapp.py: report error in case of connection error rather than raising a generic exception by default.
+
+## 20.1.0
+### New Module
+- na_elementsw_access_group_volumes: add/remove volumes to/from existing access group
+
+## 19.11.0
+## 19.10.0
+Changes in 19.10.0 and September collection releases compared to Ansible 2.9
+### Documentation Fixes:
+- na_elementsw_drive: na_elementsw_drive was documented as na_element_drive
diff --git a/ansible_collections/netapp/elementsw/changelogs/changelog.yaml b/ansible_collections/netapp/elementsw/changelogs/changelog.yaml
new file mode 100644
index 000000000..97d921301
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/changelog.yaml
@@ -0,0 +1,221 @@
+ancestor: null
+releases:
+ 19.10.0:
+ changes:
+ minor_changes:
+ - refactor existing modules as a collection
+ fragments:
+ - 2019.10.0.yaml
+ release_date: '2019-11-14'
+ 2.7.0:
+ modules:
+ - description: NetApp Element Software Manage Access Groups
+ name: na_elementsw_access_group
+ namespace: ''
+ - description: NetApp Element Software Manage Accounts
+ name: na_elementsw_account
+ namespace: ''
+ - description: NetApp Element Software Manage Admin Users
+ name: na_elementsw_admin_users
+ namespace: ''
+ - description: NetApp Element Software Create Backups
+ name: na_elementsw_backup
+ namespace: ''
+ - description: NetApp Element Software Check connectivity to MVIP and SVIP.
+ name: na_elementsw_check_connections
+ namespace: ''
+ - description: NetApp Element Software Create Cluster
+ name: na_elementsw_cluster
+ namespace: ''
+ - description: NetApp Element Software Manage Cluster Pair
+ name: na_elementsw_cluster_pair
+ namespace: ''
+ - description: NetApp Element Software Manage Node Drives
+ name: na_elementsw_drive
+ namespace: ''
+ - description: NetApp Element Software Manage ldap admin users
+ name: na_elementsw_ldap
+ namespace: ''
+ - description: NetApp Element Software Configure Node Network Interfaces
+ name: na_elementsw_network_interfaces
+ namespace: ''
+ - description: NetApp Element Software Node Operation
+ name: na_elementsw_node
+ namespace: ''
+ - description: NetApp Element Software Manage Snapshots
+ name: na_elementsw_snapshot
+ namespace: ''
+ - description: NetApp Element Software Restore Snapshot
+ name: na_elementsw_snapshot_restore
+ namespace: ''
+ - description: NetApp Element Software Snapshot Schedules
+ name: na_elementsw_snapshot_schedule
+ namespace: ''
+ - description: NetApp Element Software Manage VLAN
+ name: na_elementsw_vlan
+ namespace: ''
+ - description: NetApp Element Software Manage Volumes
+ name: na_elementsw_volume
+ namespace: ''
+ - description: NetApp Element Software Create Volume Clone
+ name: na_elementsw_volume_clone
+ namespace: ''
+ - description: NetApp Element Software Volume Pair
+ name: na_elementsw_volume_pair
+ namespace: ''
+ release_date: '2018-09-21'
+ 2.8.0:
+ modules:
+ - description: Configure Element SW Cluster
+ name: na_elementsw_cluster_config
+ namespace: ''
+ - description: Configure Element SW Cluster SNMP
+ name: na_elementsw_cluster_snmp
+ namespace: ''
+ - description: Manage Element SW initiators
+ name: na_elementsw_initiators
+ namespace: ''
+ release_date: '2019-04-11'
+ 20.1.0:
+ modules:
+ - description: NetApp Element Software Add/Remove Volumes to/from Access Group
+ name: na_elementsw_access_group_volumes
+ namespace: ''
+ release_date: '2020-01-08'
+ 20.10.0:
+ changes:
+ minor_changes:
+ - na_elementsw_cluster - add new options ``encryption``, ``order_number``, and
+ ``serial_number``.
+ - na_elementsw_network_interfaces - make all options not required, so that only
+ bond_1g can be set for example.
+ - na_elementsw_network_interfaces - restructure options into 2 dictionaries
+ ``bond_1g`` and ``bond_10g``, so that there is no shared option. Disallow
+ all older options.
+ fragments:
+ - DEVOPS-3117.yaml
+ - DEVOPS-3196.yaml
+ - DEVOPS-3235.yaml
+ modules:
+ - description: NetApp Element Software Info
+ name: na_elementsw_info
+ namespace: ''
+ release_date: '2020-10-08'
+ 20.11.0:
+ changes:
+ bugfixes:
+ - na_elementsw_drive - Object of type 'dict_values' is not JSON serializable.
+ minor_changes:
+ - na_elementsw_snapshot_schedule - Add ``retention`` in examples.
+ fragments:
+ - DEVOPS-3310.yml
+ - DEVOPS-3324.yaml
+ release_date: '2020-11-05'
+ 20.2.0:
+ changes:
+ bugfixes:
+ - galaxy.yml - fix path to github repository.
+ - netapp.py - report error in case of connection error rather than raising a
+ generic exception by default.
+ fragments:
+ - 20.2.0.yaml
+ release_date: '2020-02-05'
+ 20.6.0:
+ changes:
+ bugfixes:
+ - galaxy.yml - fix repository and homepage links.
+ fragments:
+ - 20.6.0.yaml
+ release_date: '2020-06-03'
+ 20.8.0:
+ changes:
+ bugfixes:
+ - na_elementsw_access_group - fix check_mode so that no action is taken.
+ - na_elementsw_admin_users - fix check_mode so that no action is taken.
+ - na_elementsw_cluster - create cluster if it does not exist. Do not expect
+ MVIP or SVIP to exist before create.
+ - na_elementsw_cluster_snmp - double exception because of AttributeError.
+ - na_elementsw_drive - node_id or drive_id were not handled properly when using
+ numeric ids.
+ - na_elementsw_initiators - volume_access_group_id was ignored. volume_access_groups
+ was ignored and redundant.
+ - na_elementsw_ldap - double exception because of AttributeError.
+ - na_elementsw_snapshot_schedule - ignore schedules being deleted (idempotency),
+ remove default values and fix documentation.
+ - na_elementsw_vlan - AttributeError if VLAN already exists.
+ - na_elementsw_vlan - change in attributes was ignored.
+ - na_elementsw_vlan - fix check_mode so that no action is taken.
+ - na_elementsw_volume - Argument '512emulation' in argument_spec is not a valid
+ python identifier - renamed to enable512emulation.
+ - na_elementsw_volume - double exception because of AttributeError.
+ minor_changes:
+ - add "required:true" where missing.
+ - add "type:str" (or int, dict) where missing in documentation section.
+ - na_elementsw_drive - add all drives in a cluster, allow for a list of nodes
+ or a list of drives.
+ - remove "required:true" for state and use present as default.
+ - use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same
+ thing for 2.8 and 2.9.
+ fragments:
+ - 20.8.0.yaml
+ release_date: '2020-08-05'
+ 20.9.0:
+ changes:
+ bugfixes:
+ - na_elementsw_node - fix check_mode so that no action is taken.
+ minor_changes:
+ - na_elementsw_node - ``cluster_name`` to set the cluster name on new nodes.
+ - na_elementsw_node - ``preset_only`` to only set the cluster name before creating
+ a cluster with na_elementsw_cluster.
+ - na_elementsw_volume - ``qos_policy_name`` to provide a QOS policy name or
+ ID.
+ fragments:
+ - 20.9.0.yaml
+ modules:
+ - description: NetApp Element Software create/modify/rename/delete QOS Policy
+ name: na_elementsw_qos_policy
+ namespace: ''
+ release_date: '2020-09-02'
+ 20.9.1:
+ changes:
+ bugfixes:
+ - na_elementsw_node - improve error reporting when cluster name cannot be set
+ because node is already active.
+ - na_elementsw_schedule - missing imports TimeIntervalFrequency, Schedule, ScheduleInfo
+ have been added back
+ fragments:
+ - DEVOPS-3174.yaml
+ - DEVOPS-3188.yaml
+ release_date: '2020-09-08'
+ 21.3.0:
+ changes:
+ bugfixes:
+ - na_elementsw_drive - lastest SDK does not accept ``force_during_bin_sync``
+ and ``force_during_upgrade``.
+ - na_elementsw_qos_policy - loop would convert `minIOPS`, `maxIOPS`, `burstIOPS`
+ to str, causing type mismatch issues in comparisons.
+ - na_elementsw_snapshot_schedule - change of interface in SDK ('ScheduleInfo'
+ object has no attribute 'minutes')
+ minor_changes:
+ - na_elementsw_info - add ``cluster_nodes`` and ``cluster_drives``.
+ - na_elementsw_qos_policy - explicitly define ``minIOPS``, ``maxIOPS``, ``burstIOPS``
+ as int.
+ fragments:
+ - DEVOPS-3731.yaml
+ - DEVOPS-3733.yaml
+ - DEVOPS-3734.yaml
+ release_date: '2021-03-03'
+ 21.6.1:
+ changes:
+ bugfixes:
+ - requirements.txt - point to the correct python dependency
+ fragments:
+ - DEVOPS-3800.yaml
+ release_date: '2021-05-18'
+ 21.7.0:
+ changes:
+ minor_changes:
+ - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+.
+ fragments:
+ - DEVOPS-4416.yaml
+ release_date: '2021-11-03'
diff --git a/ansible_collections/netapp/elementsw/changelogs/config.yaml b/ansible_collections/netapp/elementsw/changelogs/config.yaml
new file mode 100644
index 000000000..2d637df5c
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/config.yaml
@@ -0,0 +1,32 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+ignore_other_fragment_extensions: true
+keep_fragments: true
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sanitize_changelog: true
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: NetApp ElementSW Collection
+trivial_section_name: trivial
+use_fqcn: true
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/20.2.0.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/20.2.0.yaml
new file mode 100644
index 000000000..832b5f56f
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/20.2.0.yaml
@@ -0,0 +1,3 @@
+bugfixes:
+ - galaxy.yml - fix path to github repository.
+ - netapp.py - report error in case of connection error rather than raising a generic exception by default.
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/20.6.0.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/20.6.0.yaml
new file mode 100644
index 000000000..fcd0d11ee
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/20.6.0.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - galaxy.yml - fix repository and homepage links.
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/20.8.0.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/20.8.0.yaml
new file mode 100644
index 000000000..5c959531a
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/20.8.0.yaml
@@ -0,0 +1,21 @@
+minor_changes:
+ - na_elementsw_drive - add all drives in a cluster, allow for a list of nodes or a list of drives.
+ - use a three group format for ``version_added``. So 2.7 becomes 2.7.0. Same thing for 2.8 and 2.9.
+ - add "type:str" (or int, dict) where missing in documentation section.
+ - add "required:true" where missing.
+ - remove "required:true" for state and use present as default.
+
+bugfixes:
+ - na_elementsw_access_group - fix check_mode so that no action is taken.
+ - na_elementsw_admin_users - fix check_mode so that no action is taken.
+ - na_elementsw_cluster - create cluster if it does not exist. Do not expect MVIP or SVIP to exist before create.
+ - na_elementsw_cluster_snmp - double exception because of AttributeError.
+ - na_elementsw_drive - node_id or drive_id were not handled properly when using numeric ids.
+ - na_elementsw_initiators - volume_access_group_id was ignored. volume_access_groups was ignored and redundant.
+ - na_elementsw_ldap - double exception because of AttributeError.
+ - na_elementsw_snapshot_schedule - ignore schedules being deleted (idempotency), remove default values and fix documentation.
+ - na_elementsw_vlan - AttributeError if VLAN already exists.
+ - na_elementsw_vlan - fix check_mode so that no action is taken.
+ - na_elementsw_vlan - change in attributes was ignored.
+ - na_elementsw_volume - double exception because of AttributeError.
+ - na_elementsw_volume - Argument '512emulation' in argument_spec is not a valid python identifier - renamed to enable512emulation.
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/20.9.0.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/20.9.0.yaml
new file mode 100644
index 000000000..a406c9c2d
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/20.9.0.yaml
@@ -0,0 +1,7 @@
+minor_changes:
+ - na_elementsw_node - ``cluster_name`` to set the cluster name on new nodes.
+ - na_elementsw_node - ``preset_only`` to only set the cluster name before creating a cluster with na_elementsw_cluster.
+ - na_elementsw_volume - ``qos_policy_name`` to provide a QOS policy name or ID.
+
+bugfixes:
+ - na_elementsw_node - fix check_mode so that no action is taken.
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/2019.10.0.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/2019.10.0.yaml
new file mode 100644
index 000000000..5723daa11
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/2019.10.0.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - refactor existing modules as a collection
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3117.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3117.yaml
new file mode 100644
index 000000000..23a6cafa4
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3117.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_elementsw_cluster - add new options ``encryption``, ``order_number``, and ``serial_number``.
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3174.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3174.yaml
new file mode 100644
index 000000000..01e754719
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3174.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_elementsw_node - improve error reporting when cluster name cannot be set because node is already active.
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3188.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3188.yaml
new file mode 100644
index 000000000..ad5d8bee7
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3188.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_elementsw_schedule - missing imports TimeIntervalFrequency, Schedule, ScheduleInfo have been added back \ No newline at end of file
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3196.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3196.yaml
new file mode 100644
index 000000000..21a70b02c
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3196.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_elementsw_network_interfaces - make all options not required, so that only bond_1g can be set for example.
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3235.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3235.yaml
new file mode 100644
index 000000000..8a2f82f34
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3235.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_elementsw_network_interfaces - restructure options into 2 dictionaries ``bond_1g`` and ``bond_10g``, so that there is no shared option. Disallow all older options.
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3310.yml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3310.yml
new file mode 100644
index 000000000..729e6d062
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3310.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_elementsw_snapshot_schedule - Add ``retention`` in examples.
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3324.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3324.yaml
new file mode 100644
index 000000000..b87e308d8
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3324.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_elementsw_drive - Object of type 'dict_values' is not JSON serializable.
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3731.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3731.yaml
new file mode 100644
index 000000000..a4e43ed45
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3731.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - na_elementsw_qos_policy - explicitly define ``minIOPS``, ``maxIOPS``, ``burstIOPS`` as int.
+bugfixes:
+ - na_elementsw_qos_policy - loop would convert `minIOPS`, `maxIOPS`, `burstIOPS` to str, causing type mismatch issues in comparisons.
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3733.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3733.yaml
new file mode 100644
index 000000000..7310f3b75
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3733.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - na_elementsw_info - add ``cluster_nodes`` and ``cluster_drives``.
+bugfixes:
+ - na_elementsw_drive - lastest SDK does not accept ``force_during_bin_sync`` and ``force_during_upgrade``.
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3734.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3734.yaml
new file mode 100644
index 000000000..08c5bf552
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3734.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_elementsw_snapshot_schedule - change of interface in SDK ('ScheduleInfo' object has no attribute 'minutes')
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3800.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3800.yaml
new file mode 100644
index 000000000..b6e57d046
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-3800.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - requirements.txt - point to the correct python dependency
diff --git a/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-4416.yaml b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-4416.yaml
new file mode 100644
index 000000000..6b4b660a0
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/changelogs/fragments/DEVOPS-4416.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+.
diff --git a/ansible_collections/netapp/elementsw/meta/runtime.yml b/ansible_collections/netapp/elementsw/meta/runtime.yml
new file mode 100644
index 000000000..05a30f02f
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/meta/runtime.yml
@@ -0,0 +1,28 @@
+---
+requires_ansible: ">=2.9.10"
+action_groups:
+ netapp_elementsw:
+ - na_elementsw_access_group
+ - na_elementsw_access_group_volumes
+ - na_elementsw_account
+ - na_elementsw_admin_users
+ - na_elementsw_backup
+ - na_elementsw_check_connections
+ - na_elementsw_cluster_config
+ - na_elementsw_cluster_pair
+ - na_elementsw_cluster
+ - na_elementsw_cluster_snmp
+ - na_elementsw_drive
+ - na_elementsw_info
+ - na_elementsw_initiators
+ - na_elementsw_ldap
+ - na_elementsw_network_interfaces
+ - na_elementsw_node
+ - na_elementsw_qos_policy
+ - na_elementsw_snapshot
+ - na_elementsw_snapshot_restore
+ - na_elementsw_snapshot_schedule
+ - na_elementsw_vlan
+ - na_elementsw_volume_clone
+ - na_elementsw_volume_pair
+ - na_elementsw_volume
diff --git a/ansible_collections/netapp/elementsw/plugins/doc_fragments/netapp.py b/ansible_collections/netapp/elementsw/plugins/doc_fragments/netapp.py
new file mode 100644
index 000000000..229d03f7d
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/doc_fragments/netapp.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, NetApp Ansible Team <ng-ansibleteam@netapp.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
+'''
+
+ # Documentation fragment for SolidFire
+ SOLIDFIRE = r'''
+options:
+ hostname:
+ required: true
+ description:
+ - The hostname or IP address of the SolidFire cluster.
+ - For na_elementsw_cluster, the Management IP (MIP) or hostname of the node to initiate the cluster creation from.
+ type: str
+ username:
+ required: true
+ description:
+ - Please ensure that the user has the adequate permissions. For more information, please read the official documentation
+ U(https://mysupport.netapp.com/documentation/docweb/index.html?productID=62636&language=en-US).
+ aliases: ['user']
+ type: str
+ password:
+ required: true
+ description:
+ - Password for the specified user.
+ aliases: ['pass']
+ type: str
+
+requirements:
+ - The modules were developed with SolidFire 10.1
+ - solidfire-sdk-python (1.1.0.92) or greater. Install using 'pip install solidfire-sdk-python'
+
+notes:
+ - The modules prefixed with na\\_elementsw are built to support the SolidFire storage platform.
+
+'''
diff --git a/ansible_collections/netapp/elementsw/plugins/module_utils/netapp.py b/ansible_collections/netapp/elementsw/plugins/module_utils/netapp.py
new file mode 100644
index 000000000..4121bf8e7
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/module_utils/netapp.py
@@ -0,0 +1,107 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2017, Sumit Kumar <sumit4@netapp.com>
+# Copyright (c) 2017, Michael Price <michael.price@netapp.com>
+# Copyright: (c) 2018, NetApp Ansible Team <ng-ansibleteam@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'''
+Common methods and constants
+'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+HAS_SF_SDK = False
+SF_BYTE_MAP = dict(
+ # Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000.
+ bytes=1,
+ b=1,
+ kb=1000,
+ mb=1000 ** 2,
+ gb=1000 ** 3,
+ tb=1000 ** 4,
+ pb=1000 ** 5,
+ eb=1000 ** 6,
+ zb=1000 ** 7,
+ yb=1000 ** 8
+)
+
+# uncomment this to log API calls
+# import logging
+
+try:
+ from solidfire.factory import ElementFactory
+ import solidfire.common
+ HAS_SF_SDK = True
+except ImportError:
+ HAS_SF_SDK = False
+
+COLLECTION_VERSION = "21.7.0"
+
+
+def has_sf_sdk():
+ return HAS_SF_SDK
+
+
+def ontap_sf_host_argument_spec():
+
+ return dict(
+ hostname=dict(required=True, type='str'),
+ username=dict(required=True, type='str', aliases=['user']),
+ password=dict(required=True, type='str', aliases=['pass'], no_log=True)
+ )
+
+
+def create_sf_connection(module, hostname=None, port=None, raise_on_connection_error=False, timeout=None):
+ if hostname is None:
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+ options = dict()
+ if port is not None:
+ options['port'] = port
+ if timeout is not None:
+ options['timeout'] = timeout
+
+ if not HAS_SF_SDK:
+ module.fail_json(msg="the python SolidFire SDK module is required")
+
+ try:
+ logging.basicConfig(filename='/tmp/elementsw_apis.log', level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s')
+ except NameError:
+ # logging was not imported
+ pass
+
+ try:
+ return_val = ElementFactory.create(hostname, username, password, **options)
+ except (solidfire.common.ApiConnectionError, solidfire.common.ApiServerError) as exc:
+ if raise_on_connection_error:
+ raise exc
+ module.fail_json(msg=repr(exc))
+ except Exception as exc:
+ raise Exception("Unable to create SF connection: %s" % repr(exc))
+ return return_val
diff --git a/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_elementsw_module.py b/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_elementsw_module.py
new file mode 100644
index 000000000..2d8b92cfa
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_elementsw_module.py
@@ -0,0 +1,206 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Copyright: (c) 2018, NetApp Ansible Team <ng-ansibleteam@netapp.com>
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils._text import to_native
+
+HAS_SF_SDK = False
+try:
+ import solidfire.common
+ HAS_SF_SDK = True
+except ImportError:
+ HAS_SF_SDK = False
+
+
+def has_sf_sdk():
+ return HAS_SF_SDK
+
+
+class NaElementSWModule(object):
+ ''' Support class for common or shared functions '''
+ def __init__(self, elem):
+ self.elem_connect = elem
+ self.parameters = dict()
+
+ def get_volume(self, volume_id):
+ """
+ Return volume details if volume exists for given volume_id
+
+ :param volume_id: volume ID
+ :type volume_id: int
+ :return: Volume dict if found, None if not found
+ :rtype: dict
+ """
+ volume_list = self.elem_connect.list_volumes(volume_ids=[volume_id])
+ for volume in volume_list.volumes:
+ if volume.volume_id == volume_id:
+ if str(volume.delete_time) == "":
+ return volume
+ return None
+
+ def get_volume_id(self, vol_name, account_id):
+ """
+ Return volume id from the given (valid) account_id if found
+ Return None if not found
+
+ :param vol_name: Name of the volume
+ :type vol_name: str
+ :param account_id: Account ID
+ :type account_id: int
+
+ :return: Volume ID of the first matching volume if found. None if not found.
+ :rtype: int
+ """
+ volume_list = self.elem_connect.list_volumes_for_account(account_id=account_id)
+ for volume in volume_list.volumes:
+ if volume.name == vol_name:
+ # return volume_id
+ if str(volume.delete_time) == "":
+ return volume.volume_id
+ return None
+
+ def volume_id_exists(self, volume_id):
+ """
+ Return volume_id if volume exists for given volume_id
+
+ :param volume_id: volume ID
+ :type volume_id: int
+ :return: Volume ID if found, None if not found
+ :rtype: int
+ """
+ volume_list = self.elem_connect.list_volumes(volume_ids=[volume_id])
+ for volume in volume_list.volumes:
+ if volume.volume_id == volume_id:
+ if str(volume.delete_time) == "":
+ return volume.volume_id
+ return None
+
+ def volume_exists(self, volume, account_id):
+ """
+ Return volume_id if exists, None if not found
+
+ :param volume: Volume ID or Name
+ :type volume: str
+ :param account_id: Account ID (valid)
+ :type account_id: int
+ :return: Volume ID if found, None if not found
+ """
+ # If volume is an integer, get_by_id
+ if str(volume).isdigit():
+ volume_id = int(volume)
+ try:
+ if self.volume_id_exists(volume_id):
+ return volume_id
+ except solidfire.common.ApiServerError:
+ # don't fail, continue and try get_by_name
+ pass
+ # get volume by name
+ volume_id = self.get_volume_id(volume, account_id)
+ return volume_id
+
+ def get_snapshot(self, snapshot_id, volume_id):
+ """
+ Return snapshot details if found
+
+ :param snapshot_id: Snapshot ID or Name
+ :type snapshot_id: str
+ :param volume_id: Account ID (valid)
+ :type volume_id: int
+ :return: Snapshot dict if found, None if not found
+ :rtype: dict
+ """
+ # mandate src_volume_id although not needed by sdk
+ snapshot_list = self.elem_connect.list_snapshots(
+ volume_id=volume_id)
+ for snapshot in snapshot_list.snapshots:
+ # if actual id is provided
+ if str(snapshot_id).isdigit() and snapshot.snapshot_id == int(snapshot_id):
+ return snapshot
+ # if snapshot name is provided
+ elif snapshot.name == snapshot_id:
+ return snapshot
+ return None
+
+ @staticmethod
+ def map_qos_obj_to_dict(qos_obj):
+ ''' Take a QOS object and return a key, normalize the key names
+ Interestingly, the APIs are using different ids for create and get
+ '''
+ mappings = [
+ ('burst_iops', 'burstIOPS'),
+ ('min_iops', 'minIOPS'),
+ ('max_iops', 'maxIOPS'),
+ ]
+ qos_dict = vars(qos_obj)
+ # Align names to create API and module interface
+ for read, send in mappings:
+ if read in qos_dict:
+ qos_dict[send] = qos_dict.pop(read)
+ return qos_dict
+
+ def get_qos_policy(self, name):
+ """
+ Get QOS Policy
+ :description: Get QOS Policy object for a given name
+ :return: object, error
+ Policy object converted to dict if found, else None
+ Error text if error, else None
+ :rtype: dict/None, str/None
+ """
+ try:
+ qos_policy_list_obj = self.elem_connect.list_qos_policies()
+ except (solidfire.common.ApiServerError, solidfire.common.ApiConnectionError) as exc:
+ error = "Error getting list of qos policies: %s" % to_native(exc)
+ return None, error
+
+ policy_dict = dict()
+ if hasattr(qos_policy_list_obj, 'qos_policies'):
+ for policy in qos_policy_list_obj.qos_policies:
+ # Check and get policy object for a given name
+ if str(policy.qos_policy_id) == name:
+ policy_dict = vars(policy)
+ elif policy.name == name:
+ policy_dict = vars(policy)
+ if 'qos' in policy_dict:
+ policy_dict['qos'] = self.map_qos_obj_to_dict(policy_dict['qos'])
+
+ return policy_dict if policy_dict else None, None
+
+ def account_exists(self, account):
+ """
+ Return account_id if account exists for given account id or name
+ Raises an exception if account does not exist
+
+ :param account: Account ID or Name
+ :type account: str
+ :return: Account ID if found, None if not found
+ """
+ # If account is an integer, get_by_id
+ if account.isdigit():
+ account_id = int(account)
+ try:
+ result = self.elem_connect.get_account_by_id(account_id=account_id)
+ if result.account.account_id == account_id:
+ return account_id
+ except solidfire.common.ApiServerError:
+ # don't fail, continue and try get_by_name
+ pass
+ # get account by name, the method returns an Exception if account doesn't exist
+ result = self.elem_connect.get_account_by_name(username=account)
+ return result.account.account_id
+
+ def set_element_attributes(self, source):
+ """
+ Return telemetry attributes for the current execution
+
+ :param source: name of the module
+ :type source: str
+ :return: a dict containing telemetry attributes
+ """
+ attributes = {}
+ attributes['config-mgmt'] = 'ansible'
+ attributes['event-source'] = source
+ return attributes
diff --git a/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_module.py b/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_module.py
new file mode 100644
index 000000000..c2b02d3d2
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/module_utils/netapp_module.py
@@ -0,0 +1,225 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2018, NetApp Ansible Team <ng-ansibleteam@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+''' Support class for NetApp ansible modules '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def cmp(a, b):
+ """
+ Python 3 does not have a cmp function, this will do the cmp.
+ :param a: first object to check
+ :param b: second object to check
+ :return:
+ """
+ # convert to lower case for string comparison.
+ if a is None:
+ return -1
+ if type(a) is str and type(b) is str:
+ a = a.lower()
+ b = b.lower()
+ # if list has string element, convert string to lower case.
+ if type(a) is list and type(b) is list:
+ a = [x.lower() if type(x) is str else x for x in a]
+ b = [x.lower() if type(x) is str else x for x in b]
+ a.sort()
+ b.sort()
+ return (a > b) - (a < b)
+
+
+class NetAppModule(object):
+ '''
+ Common class for NetApp modules
+ set of support functions to derive actions based
+ on the current state of the system, and a desired state
+ '''
+
+ def __init__(self):
+ self.log = list()
+ self.changed = False
+ self.parameters = {'name': 'not intialized'}
+ # self.debug = list()
+
+ def set_parameters(self, ansible_params):
+ self.parameters = dict()
+ for param in ansible_params:
+ if ansible_params[param] is not None:
+ self.parameters[param] = ansible_params[param]
+ return self.parameters
+
+ def get_cd_action(self, current, desired):
+ ''' takes a desired state and a current state, and return an action:
+ create, delete, None
+ eg:
+ is_present = 'absent'
+ some_object = self.get_object(source)
+ if some_object is not None:
+ is_present = 'present'
+ action = cd_action(current=is_present, desired = self.desired.state())
+ '''
+ if 'state' in desired:
+ desired_state = desired['state']
+ else:
+ desired_state = 'present'
+
+ if current is None and desired_state == 'absent':
+ return None
+ if current is not None and desired_state == 'present':
+ return None
+ # change in state
+ self.changed = True
+ if current is not None:
+ return 'delete'
+ return 'create'
+
+ def compare_and_update_values(self, current, desired, keys_to_compare):
+ updated_values = dict()
+ is_changed = False
+ for key in keys_to_compare:
+ if key in current:
+ if key in desired and desired[key] is not None:
+ if current[key] != desired[key]:
+ updated_values[key] = desired[key]
+ is_changed = True
+ else:
+ updated_values[key] = current[key]
+ else:
+ updated_values[key] = current[key]
+
+ return updated_values, is_changed
+
+ @staticmethod
+ def check_keys(current, desired):
+ ''' TODO: raise an error if keys do not match
+ with the exception of:
+ new_name, state in desired
+ '''
+ pass
+
+ @staticmethod
+ def compare_lists(current, desired, get_list_diff):
+ ''' compares two lists and return a list of elements that are either the desired elements or elements that are
+ modified from the current state depending on the get_list_diff flag
+ :param: current: current item attribute in ONTAP
+ :param: desired: attributes from playbook
+ :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
+ :return: list of attributes to be modified
+ :rtype: list
+ '''
+ desired_diff_list = [item for item in desired if item not in current] # get what in desired and not in current
+ current_diff_list = [item for item in current if item not in desired] # get what in current but not in desired
+
+ if desired_diff_list or current_diff_list:
+ # there are changes
+ if get_list_diff:
+ return desired_diff_list
+ else:
+ return desired
+ else:
+ return []
+
+ def get_modified_attributes(self, current, desired, get_list_diff=False, additional_keys=False):
+ ''' takes two dicts of attributes and return a dict of attributes that are
+ not in the current state
+ It is expected that all attributes of interest are listed in current and
+ desired.
+ The same assumption holds true for any nested directory.
+ TODO: This is actually not true for the ElementSW 'attributes' directory.
+ Practically it means you cannot add or remove a key in a modify.
+ :param: current: current attributes in ONTAP
+ :param: desired: attributes from playbook
+ :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
+ :return: dict of attributes to be modified
+ :rtype: dict
+
+ NOTE: depending on the attribute, the caller may need to do a modify or a
+ different operation (eg move volume if the modified attribute is an
+ aggregate name)
+ '''
+ # uncomment these 2 lines if needed
+ # self.log.append('current: %s' % repr(current))
+ # self.log.append('desired: %s' % repr(desired))
+ # if the object does not exist, we can't modify it
+ modified = dict()
+ if current is None:
+ return modified
+
+ # error out if keys do not match
+ self.check_keys(current, desired)
+
+ # collect changed attributes
+ for key, value in current.items():
+ if key in desired and desired[key] is not None:
+ if type(value) is list:
+ modified_list = self.compare_lists(value, desired[key], get_list_diff) # get modified list from current and desired
+ if modified_list:
+ modified[key] = modified_list
+ elif type(value) is dict:
+ modified_dict = self.get_modified_attributes(value, desired[key], get_list_diff, additional_keys=True)
+ if modified_dict:
+ modified[key] = modified_dict
+ elif cmp(value, desired[key]) != 0:
+ modified[key] = desired[key]
+ if additional_keys:
+ for key, value in desired.items():
+ if key not in current:
+ modified[key] = desired[key]
+ if modified:
+ self.changed = True
+ # Uncomment this line if needed
+ # self.log.append('modified: %s' % repr(modified))
+ return modified
+
+ def is_rename_action(self, source, target):
+ ''' takes a source and target object, and returns True
+ if a rename is required
+ eg:
+ source = self.get_object(source_name)
+ target = self.get_object(target_name)
+ action = is_rename_action(source, target)
+ :return: None for error, True for rename action, False otherwise
+ '''
+ if source is None and target is None:
+ # error, do nothing
+ # cannot rename an non existent resource
+ # alternatively we could create B
+ return None
+ if source is not None and target is not None:
+ # error, do nothing
+ # idempotency (or) new_name_is_already_in_use
+ # alternatively we could delete B and rename A to B
+ return False
+ if source is None and target is not None:
+ # do nothing, maybe the rename was already done
+ return False
+ # source is not None and target is None:
+ # rename is in order
+ self.changed = True
+ return True
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group.py
new file mode 100644
index 000000000..467ca415c
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group.py
@@ -0,0 +1,397 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+Element Software Access Group Manager
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_access_group
+
+short_description: NetApp Element Software Manage Access Groups
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, destroy, or update access groups on Element Software Cluster.
+
+options:
+
+ state:
+ description:
+ - Whether the specified access group should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ from_name:
+ description:
+ - ID or Name of the access group to rename.
+ - Required to create a new access group called 'name' by renaming 'from_name'.
+ version_added: 2.8.0
+ type: str
+
+ name:
+ description:
+ - Name for the access group for create, modify and delete operations.
+ required: True
+ aliases:
+ - src_access_group_id
+ type: str
+
+ initiators:
+ description:
+ - List of initiators to include in the access group. If unspecified, the access group will start out without configured initiators.
+ type: list
+ elements: str
+
+ volumes:
+ description:
+ - List of volumes to initially include in the volume access group. If unspecified, the access group will start without any volumes.
+ - It accepts either volume_name or volume_id
+ type: list
+ elements: str
+
+ account_id:
+ description:
+ - Account ID for the owner of this volume.
+ - It accepts either account_name or account_id
+ - if account_id is digit, it will consider as account_id
+ - If account_id is string, it will consider as account_name
+ version_added: 2.8.0
+ type: str
+
+ virtual_network_id:
+ description:
+ - The ID of the Element SW Software Cluster Virtual Network to associate the access group with.
+ type: int
+
+ virtual_network_tags:
+ description:
+ - The tags of VLAN Virtual Network Tag to associate the access group with.
+ type: list
+ elements: str
+
+ attributes:
+ description: List of Name/Value pairs in JSON object format.
+ type: dict
+
+'''
+
+EXAMPLES = """
+ - name: Create Access Group
+ na_elementsw_access_group:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ name: AnsibleAccessGroup
+ volumes: [7,8]
+ account_id: 1
+
+ - name: Modify Access Group
+ na_elementsw_access_group:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ name: AnsibleAccessGroup-Renamed
+ account_id: 1
+ attributes: {"volumes": [1,2,3], "virtual_network_id": 12345}
+
+ - name: Rename Access Group
+ na_elementsw_access_group:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ from_name: AnsibleAccessGroup
+ name: AnsibleAccessGroup-Renamed
+
+ - name: Delete Access Group
+ na_elementsw_access_group:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ name: 1
+"""
+
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWAccessGroup(object):
+ """
+ Element Software Volume Access Group
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ from_name=dict(required=False, type='str'),
+ name=dict(required=True, aliases=["src_access_group_id"], type='str'),
+ initiators=dict(required=False, type='list', elements='str'),
+ volumes=dict(required=False, type='list', elements='str'),
+ account_id=dict(required=False, type='str'),
+ virtual_network_id=dict(required=False, type='int'),
+ virtual_network_tags=dict(required=False, type='list', elements='str'),
+ attributes=dict(required=False, type='dict'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['account_id'])
+ ],
+ supports_check_mode=True
+ )
+
+ input_params = self.module.params
+
+ # Set up state variables
+ self.state = input_params['state']
+ self.from_name = input_params['from_name']
+ self.access_group_name = input_params['name']
+ self.initiators = input_params['initiators']
+ self.volumes = input_params['volumes']
+ self.account_id = input_params['account_id']
+ self.virtual_network_id = input_params['virtual_network_id']
+ self.virtual_network_tags = input_params['virtual_network_tags']
+ self.attributes = input_params['attributes']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ if self.attributes is not None:
+ self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_access_group'))
+ else:
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_access_group')
+
+ def get_access_group(self, name):
+ """
+ Get Access Group
+ :description: Get Access Group object for a given name
+
+ :return: object (Group object)
+ :rtype: object (Group object)
+ """
+ access_groups_list = self.sfe.list_volume_access_groups()
+ group_obj = None
+
+ for group in access_groups_list.volume_access_groups:
+ # Check and get access_group object for a given name
+ if str(group.volume_access_group_id) == name:
+ group_obj = group
+ elif group.name == name:
+ group_obj = group
+
+ return group_obj
+
+ def get_account_id(self):
+ # Validate account id
+ # Return account_id if found, None otherwise
+ try:
+ account_id = self.elementsw_helper.account_exists(self.account_id)
+ return account_id
+ except solidfire.common.ApiServerError:
+ return None
+
+ def get_volume_ids(self):
+ # Validate volume_ids
+ # Return volume ids if found, fail if not found
+ volume_ids = []
+ for volume in self.volumes:
+ volume_id = self.elementsw_helper.volume_exists(volume, self.account_id)
+ if volume_id:
+ volume_ids.append(volume_id)
+ else:
+ self.module.fail_json(msg='Specified volume %s does not exist' % volume)
+ return volume_ids
+
+ def create_access_group(self):
+ """
+ Create the Access Group
+ """
+ try:
+ self.sfe.create_volume_access_group(name=self.access_group_name,
+ initiators=self.initiators,
+ volumes=self.volumes,
+ virtual_network_id=self.virtual_network_id,
+ virtual_network_tags=self.virtual_network_tags,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg="Error creating volume access group %s: %s" %
+ (self.access_group_name, to_native(e)), exception=traceback.format_exc())
+
+ def delete_access_group(self):
+ """
+ Delete the Access Group
+ """
+ try:
+ self.sfe.delete_volume_access_group(volume_access_group_id=self.group_id)
+
+ except Exception as e:
+ self.module.fail_json(msg="Error deleting volume access group %s: %s" %
+ (self.access_group_name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_access_group(self):
+ """
+ Update the Access Group if the access_group already exists
+ """
+ try:
+ self.sfe.modify_volume_access_group(volume_access_group_id=self.group_id,
+ virtual_network_id=self.virtual_network_id,
+ virtual_network_tags=self.virtual_network_tags,
+ initiators=self.initiators,
+ volumes=self.volumes,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg="Error updating volume access group %s: %s" %
+ (self.access_group_name, to_native(e)), exception=traceback.format_exc())
+
+ def rename_access_group(self):
+ """
+ Rename the Access Group to the new name
+ """
+ try:
+ self.sfe.modify_volume_access_group(volume_access_group_id=self.from_group_id,
+ virtual_network_id=self.virtual_network_id,
+ virtual_network_tags=self.virtual_network_tags,
+ name=self.access_group_name,
+ initiators=self.initiators,
+ volumes=self.volumes,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg="Error updating volume access group %s: %s" %
+ (self.from_name, to_native(e)), exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Process the access group operation on the Element Software Cluster
+ """
+ changed = False
+ action = None
+
+ input_account_id = self.account_id
+ if self.account_id is not None:
+ self.account_id = self.get_account_id()
+ if self.state == 'present' and self.volumes is not None:
+ if self.account_id:
+ self.volumes = self.get_volume_ids()
+ else:
+ self.module.fail_json(msg='Error: Specified account id "%s" does not exist.' % str(input_account_id))
+
+ group_detail = self.get_access_group(self.access_group_name)
+
+ if group_detail is not None:
+ # If access group found
+ self.group_id = group_detail.volume_access_group_id
+
+ if self.state == "absent":
+ action = 'delete'
+ changed = True
+ else:
+ # If state - present, check for any parameter of exising group needs modification.
+ if self.volumes is not None and len(self.volumes) > 0:
+ # Compare the volume list
+ if not group_detail.volumes:
+ # If access group does not have any volume attached
+ action = 'update'
+ changed = True
+ else:
+ for volumeID in group_detail.volumes:
+ if volumeID not in self.volumes:
+ action = 'update'
+ changed = True
+ break
+
+ elif self.initiators is not None and group_detail.initiators != self.initiators:
+ action = 'update'
+ changed = True
+
+ elif self.virtual_network_id is not None or self.virtual_network_tags is not None:
+ action = 'update'
+ changed = True
+
+ else:
+ # access_group does not exist
+ if self.state == "present" and self.from_name is not None:
+ group_detail = self.get_access_group(self.from_name)
+ if group_detail is not None:
+ # If resource pointed by from_name exists, rename the access_group to name
+ self.from_group_id = group_detail.volume_access_group_id
+ action = 'rename'
+ changed = True
+ else:
+ # If resource pointed by from_name does not exists, error out
+ self.module.fail_json(msg="Resource does not exist : %s" % self.from_name)
+ elif self.state == "present":
+ # If from_name is not defined, Create from scratch.
+ action = 'create'
+ changed = True
+
+ if changed and not self.module.check_mode:
+ if action == 'create':
+ self.create_access_group()
+ elif action == 'rename':
+ self.rename_access_group()
+ elif action == 'update':
+ self.update_access_group()
+ elif action == 'delete':
+ self.delete_access_group()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_access_group = ElementSWAccessGroup()
+ na_elementsw_access_group.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group_volumes.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group_volumes.py
new file mode 100644
index 000000000..af9053a13
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_access_group_volumes.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+Element Software Access Group Volumes
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_access_group_volumes
+
+short_description: NetApp Element Software Add/Remove Volumes to/from Access Group
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 20.1.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Add or remove volumes to/from access group on Element Software Cluster.
+
+options:
+
+ state:
+ description:
+ - Whether the specified volumes should exist or not for this access group.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ access_group:
+ description:
+ - Name or id for the access group to add volumes to, or remove volumes from
+ required: true
+ type: str
+
+ volumes:
+ description:
+ - List of volumes to add/remove from/to the access group.
+ - It accepts either volume_name or volume_id
+ required: True
+ type: list
+ elements: str
+
+ account_id:
+ description:
+ - Account ID for the owner of this volume.
+ - It accepts either account_name or account_id
+ - if account_id is numeric, look up for account_id first, then look up for account_name
+ - If account_id is not numeric, look up for account_name
+ required: true
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Add Volumes to Access Group
+ na_elementsw_access_group:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ access_group: AnsibleAccessGroup
+ volumes: ['vol7','vol8','vol9']
+ account_id: '1'
+
+ - name: Remove Volumes from Access Group
+ na_elementsw_access_group:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ access_group: AnsibleAccessGroup
+ volumes: ['vol7','vol9']
+ account_id: '1'
+"""
+
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWAccessGroupVolumes(object):
+ """
+ Element Software Access Group Volumes
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ access_group=dict(required=True, type='str'),
+ volumes=dict(required=True, type='list', elements='str'),
+ account_id=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ input_params = self.module.params
+
+ # Set up state variables
+ self.state = input_params['state']
+ self.access_group_name = input_params['access_group']
+ self.volumes = input_params['volumes']
+ self.account_id = input_params['account_id']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_access_group')
+
+ def get_access_group(self, name):
+ """
+ Get Access Group
+ :description: Get Access Group object for a given name
+
+ :return: object (Group object)
+ :rtype: object (Group object)
+ """
+ access_groups_list = self.sfe.list_volume_access_groups()
+ group_obj = None
+
+ for group in access_groups_list.volume_access_groups:
+ # Check and get access_group object for a given name
+ if str(group.volume_access_group_id) == name:
+ group_obj = group
+ elif group.name == name:
+ group_obj = group
+
+ return group_obj
+
+ def get_account_id(self):
+ # Validate account id
+ # Return account_id if found, None otherwise
+ try:
+ account_id = self.elementsw_helper.account_exists(self.account_id)
+ return account_id
+ except solidfire.common.ApiServerError:
+ return None
+
+ def get_volume_ids(self):
+ # Validate volume_ids
+ # Return volume ids if found, fail if not found
+ volume_ids = []
+ for volume in self.volumes:
+ volume_id = self.elementsw_helper.volume_exists(volume, self.account_id)
+ if volume_id:
+ volume_ids.append(volume_id)
+ else:
+ self.module.fail_json(msg='Error: Specified volume %s does not exist' % volume)
+ return volume_ids
+
+ def update_access_group(self, volumes):
+ """
+ Update the Access Group if the access_group already exists
+ """
+ try:
+ self.sfe.modify_volume_access_group(volume_access_group_id=self.group_id,
+ volumes=volumes)
+ except Exception as e:
+ self.module.fail_json(msg="Error updating volume access group %s: %s" %
+ (self.access_group_name, to_native(e)), exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Process the volume add/remove operations for the access group on the Element Software Cluster
+ """
+ changed = False
+ input_account_id = self.account_id
+
+ if self.account_id is not None:
+ self.account_id = self.get_account_id()
+ if self.account_id is None:
+ self.module.fail_json(msg='Error: Specified account id "%s" does not exist.' % str(input_account_id))
+
+ # get volume data
+ self.volume_ids = self.get_volume_ids()
+ group_detail = self.get_access_group(self.access_group_name)
+ if group_detail is None:
+ self.module.fail_json(msg='Error: Specified access group "%s" does not exist for account id: %s.' % (self.access_group_name, str(input_account_id)))
+ self.group_id = group_detail.volume_access_group_id
+ volumes = group_detail.volumes
+
+ # compare expected list of volumes to existing one
+ if self.state == "absent":
+ # remove volumes if present in access group
+ volumes = [vol for vol in group_detail.volumes if vol not in self.volume_ids]
+ else:
+ # add volumes if not already present
+ volumes = [vol for vol in self.volume_ids if vol not in group_detail.volumes]
+ volumes.extend(group_detail.volumes)
+
+ # update if there is a change
+ if len(volumes) != len(group_detail.volumes):
+ if not self.module.check_mode:
+ self.update_access_group(volumes)
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_access_group_volumes = ElementSWAccessGroupVolumes()
+ na_elementsw_access_group_volumes.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_account.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_account.py
new file mode 100644
index 000000000..862753747
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_account.py
@@ -0,0 +1,340 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+Element Software Account Manager
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_account
+
+short_description: NetApp Element Software Manage Accounts
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, destroy, or update accounts on Element SW
+
+options:
+
+ state:
+ description:
+ - Whether the specified account should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ element_username:
+ description:
+ - Unique username for this account. (May be 1 to 64 characters in length).
+ required: true
+ aliases:
+ - account_id
+ type: str
+
+ from_name:
+ description:
+ - ID or Name of the account to rename.
+ - Required to create an account called 'element_username' by renaming 'from_name'.
+ version_added: 2.8.0
+ type: str
+
+ initiator_secret:
+ description:
+ - CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable.
+ - The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret.
+ - If not specified, a random secret is created.
+ type: str
+
+ target_secret:
+ description:
+ - CHAP secret to use for the target (mutual CHAP authentication).
+ - Should be 12-16 characters long and impenetrable.
+ - The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret.
+ - If not specified, a random secret is created.
+ type: str
+
+ attributes:
+ description: List of Name/Value pairs in JSON object format.
+ type: dict
+
+ status:
+ description:
+ - Status of the account.
+ type: str
+
+'''
+
+EXAMPLES = """
+- name: Create Account
+ na_elementsw_account:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ element_username: TenantA
+
+- name: Modify Account
+ na_elementsw_account:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ status: locked
+ element_username: TenantA
+
+- name: Rename Account
+ na_elementsw_account:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ element_username: TenantA_Renamed
+ from_name: TenantA
+
+- name: Rename and modify Account
+ na_elementsw_account:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ status: locked
+ element_username: TenantA_Renamed
+ from_name: TenantA
+
+- name: Delete Account
+ na_elementsw_account:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ element_username: TenantA_Renamed
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementSWAccount(object):
+ """
+ Element SW Account
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ element_username=dict(required=True, aliases=["account_id"], type='str'),
+ from_name=dict(required=False, default=None),
+ initiator_secret=dict(required=False, type='str', no_log=True),
+ target_secret=dict(required=False, type='str', no_log=True),
+ attributes=dict(required=False, type='dict'),
+ status=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ params = self.module.params
+
+ # set up state variables
+ self.state = params.get('state')
+ self.element_username = params.get('element_username')
+ self.from_name = params.get('from_name')
+ self.initiator_secret = params.get('initiator_secret')
+ self.target_secret = params.get('target_secret')
+ self.attributes = params.get('attributes')
+ self.status = params.get('status')
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the Element SW Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ if self.attributes is not None:
+ self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_account'))
+ else:
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_account')
+
+ def get_account(self, username):
+ """
+ Get Account
+ :description: Get Account object from account id or name
+
+ :return: Details about the account. None if not found.
+ :rtype: object (Account object)
+ """
+
+ account_list = self.sfe.list_accounts()
+
+ for account in account_list.accounts:
+ # Check and get account object for a given name
+ if str(account.account_id) == username:
+ return account
+ elif account.username == username:
+ return account
+ return None
+
+ def create_account(self):
+ """
+ Create the Account
+ """
+ try:
+ self.sfe.add_account(username=self.element_username,
+ initiator_secret=self.initiator_secret,
+ target_secret=self.target_secret,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg='Error creating account %s: %s' % (self.element_username, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_account(self):
+ """
+ Delete the Account
+ """
+ try:
+ self.sfe.remove_account(account_id=self.account_id)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error deleting account %s: %s' % (self.account_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_account(self):
+ """
+ Rename the Account
+ """
+ try:
+ self.sfe.modify_account(account_id=self.account_id,
+ username=self.element_username,
+ status=self.status,
+ initiator_secret=self.initiator_secret,
+ target_secret=self.target_secret,
+ attributes=self.attributes)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error renaming account %s: %s' % (self.account_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_account(self):
+ """
+ Update the Account if account already exists
+ """
+ try:
+ self.sfe.modify_account(account_id=self.account_id,
+ status=self.status,
+ initiator_secret=self.initiator_secret,
+ target_secret=self.target_secret,
+ attributes=self.attributes)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error updating account %s: %s' % (self.account_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Process the account operation on the Element OS Cluster
+ """
+ changed = False
+ update_account = False
+ account_detail = self.get_account(self.element_username)
+
+ if account_detail is None and self.state == 'present':
+ changed = True
+
+ elif account_detail is not None:
+ # If account found
+ self.account_id = account_detail.account_id
+
+ if self.state == 'absent':
+ changed = True
+ else:
+ # If state - present, check for any parameter of exising account needs modification.
+ if account_detail.username is not None and self.element_username is not None and \
+ account_detail.username != self.element_username:
+ update_account = True
+ changed = True
+ elif account_detail.status is not None and self.status is not None \
+ and account_detail.status != self.status:
+ update_account = True
+ changed = True
+
+ elif account_detail.initiator_secret is not None and self.initiator_secret is not None \
+ and account_detail.initiator_secret != self.initiator_secret:
+ update_account = True
+ changed = True
+
+ elif account_detail.target_secret is not None and self.target_secret is not None \
+ and account_detail.target_secret != self.target_secret:
+ update_account = True
+ changed = True
+
+ elif account_detail.attributes is not None and self.attributes is not None \
+ and account_detail.attributes != self.attributes:
+ update_account = True
+ changed = True
+ if changed:
+ if self.module.check_mode:
+ # Skipping the changes
+ pass
+ else:
+ if self.state == 'present':
+ if update_account:
+ self.update_account()
+ else:
+ if self.from_name is not None:
+ # If from_name is defined
+ account_exists = self.get_account(self.from_name)
+ if account_exists is not None:
+ # If resource pointed by from_name exists, rename the account to name
+ self.account_id = account_exists.account_id
+ self.rename_account()
+ else:
+ # If resource pointed by from_name does not exists, error out
+ self.module.fail_json(msg="Resource does not exist : %s" % self.from_name)
+ else:
+ # If from_name is not defined, create from scratch.
+ self.create_account()
+ elif self.state == 'absent':
+ self.delete_account()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_account = ElementSWAccount()
+ na_elementsw_account.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_admin_users.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_admin_users.py
new file mode 100644
index 000000000..7ad46648a
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_admin_users.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_admin_users
+
+short_description: NetApp Element Software Manage Admin Users
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, destroy, or update admin users on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified account should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ element_username:
+ description:
+ - Unique username for this account. (May be 1 to 64 characters in length).
+ required: true
+ type: str
+
+ element_password:
+ description:
+ - The password for the new admin account. Setting the password attribute will always reset your password, even if the password is the same
+ type: str
+
+ acceptEula:
+ description:
+ - Boolean, true for accepting Eula, False Eula
+ type: bool
+
+ access:
+ description:
+ - A list of types the admin has access to
+ type: list
+ elements: str
+'''
+
+EXAMPLES = """
+ - name: Add admin user
+ na_elementsw_admin_users:
+ state: present
+ username: "{{ admin_user_name }}"
+ password: "{{ admin_password }}"
+ hostname: "{{ hostname }}"
+ element_username: carchi8py
+ element_password: carchi8py
+ acceptEula: True
+ access: accounts,drives
+
+ - name: modify admin user
+ na_elementsw_admin_users:
+ state: present
+ username: "{{ admin_user_name }}"
+ password: "{{ admin_password }}"
+ hostname: "{{ hostname }}"
+ element_username: carchi8py
+ element_password: carchi8py12
+ acceptEula: True
+ access: accounts,drives,nodes
+
+ - name: delete admin user
+ na_elementsw_admin_users:
+ state: absent
+ username: "{{ admin_user_name }}"
+ password: "{{ admin_password }}"
+ hostname: "{{ hostname }}"
+ element_username: carchi8py
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class NetAppElementSWAdminUser(object):
+ """
+ Class to set, modify and delete admin users on ElementSW box
+ """
+
+ def __init__(self):
+ """
+ Initialize the NetAppElementSWAdminUser class.
+ """
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ element_username=dict(required=True, type='str'),
+ element_password=dict(required=False, type='str', no_log=True),
+ acceptEula=dict(required=False, type='bool'),
+ access=dict(required=False, type='list', elements='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ param = self.module.params
+ # set up state variables
+ self.state = param['state']
+ self.element_username = param['element_username']
+ self.element_password = param['element_password']
+ self.acceptEula = param['acceptEula']
+ self.access = param['access']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_admin_users')
+
+ def does_admin_user_exist(self):
+ """
+ Checks to see if an admin user exists or not
+ :return: True if the user exist, False if it dose not exist
+ """
+ admins_list = self.sfe.list_cluster_admins()
+ for admin in admins_list.cluster_admins:
+ if admin.username == self.element_username:
+ return True
+ return False
+
+ def get_admin_user(self):
+ """
+ Get the admin user object
+ :return: the admin user object
+ """
+ admins_list = self.sfe.list_cluster_admins()
+ for admin in admins_list.cluster_admins:
+ if admin.username == self.element_username:
+ return admin
+ return None
+
+ def modify_admin_user(self):
+ """
+ Modify a admin user. If a password is set the user will be modified as there is no way to
+ compare a new password with an existing one
+ :return: if a user was modified or not
+ """
+ changed = False
+ admin_user = self.get_admin_user()
+ if self.access is not None and len(self.access) > 0:
+ for access in self.access:
+ if access not in admin_user.access:
+ changed = True
+ if changed and not self.module.check_mode:
+ self.sfe.modify_cluster_admin(cluster_admin_id=admin_user.cluster_admin_id,
+ access=self.access,
+ password=self.element_password,
+ attributes=self.attributes)
+
+ return changed
+
+ def add_admin_user(self):
+ """
+ Add's a new admin user to the element cluster
+ :return: nothing
+ """
+ self.sfe.add_cluster_admin(username=self.element_username,
+ password=self.element_password,
+ access=self.access,
+ accept_eula=self.acceptEula,
+ attributes=self.attributes)
+
+ def delete_admin_user(self):
+ """
+ Deletes an existing admin user from the element cluster
+ :return: nothing
+ """
+ admin_user = self.get_admin_user()
+ self.sfe.remove_cluster_admin(cluster_admin_id=admin_user.cluster_admin_id)
+
+ def apply(self):
+ """
+ determines which method to call to set, delete or modify admin users
+ :return:
+ """
+ changed = False
+ if self.state == "present":
+ if self.does_admin_user_exist():
+ changed = self.modify_admin_user()
+ else:
+ if not self.module.check_mode:
+ self.add_admin_user()
+ changed = True
+ else:
+ if self.does_admin_user_exist():
+ if not self.module.check_mode:
+ self.delete_admin_user()
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppElementSWAdminUser()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_backup.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_backup.py
new file mode 100644
index 000000000..e81e7c5ea
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_backup.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+Element Software Backup Manager
+"""
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+
+module: na_elementsw_backup
+
+short_description: NetApp Element Software Create Backups
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create backup
+
+options:
+
+ src_volume_id:
+ description:
+ - ID of the backup source volume.
+ required: true
+ aliases:
+ - volume_id
+ type: str
+
+ dest_hostname:
+ description:
+ - hostname for the backup source cluster
+ - will be set equal to hostname if not specified
+ required: false
+ type: str
+
+ dest_username:
+ description:
+ - username for the backup destination cluster
+ - will be set equal to username if not specified
+ required: false
+ type: str
+
+ dest_password:
+ description:
+ - password for the backup destination cluster
+ - will be set equal to password if not specified
+ required: false
+ type: str
+
+ dest_volume_id:
+ description:
+ - ID of the backup destination volume
+ required: true
+ type: str
+
+ format:
+ description:
+ - Backup format to use
+ choices: ['native','uncompressed']
+ required: false
+ default: 'native'
+ type: str
+
+ script:
+ description:
+ - the backup script to be executed
+ required: false
+ type: str
+
+ script_parameters:
+ description:
+ - the backup script parameters
+ required: false
+ type: dict
+
+'''
+
+EXAMPLES = """
+na_elementsw_backup:
+ hostname: "{{ source_cluster_hostname }}"
+ username: "{{ source_cluster_username }}"
+ password: "{{ source_cluster_password }}"
+ src_volume_id: 1
+ dest_hostname: "{{ destination_cluster_hostname }}"
+ dest_username: "{{ destination_cluster_username }}"
+ dest_password: "{{ destination_cluster_password }}"
+ dest_volume_id: 3
+ format: native
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+import time
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWBackup(object):
+ ''' class to handle backup operations '''
+
+ def __init__(self):
+ """
+ Setup Ansible parameters and SolidFire connection
+ """
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+
+ self.argument_spec.update(dict(
+
+ src_volume_id=dict(aliases=['volume_id'], required=True, type='str'),
+ dest_hostname=dict(required=False, type='str'),
+ dest_username=dict(required=False, type='str'),
+ dest_password=dict(required=False, type='str', no_log=True),
+ dest_volume_id=dict(required=True, type='str'),
+ format=dict(required=False, choices=['native', 'uncompressed'], default='native'),
+ script=dict(required=False, type='str'),
+ script_parameters=dict(required=False, type='dict')
+
+
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_together=[['script', 'script_parameters']],
+ supports_check_mode=True
+ )
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+
+ # If destination cluster details are not specified , set the destination to be the same as the source
+ if self.module.params["dest_hostname"] is None:
+ self.module.params["dest_hostname"] = self.module.params["hostname"]
+ if self.module.params["dest_username"] is None:
+ self.module.params["dest_username"] = self.module.params["username"]
+ if self.module.params["dest_password"] is None:
+ self.module.params["dest_password"] = self.module.params["password"]
+
+ params = self.module.params
+
+ # establish a connection to both source and destination elementsw clusters
+ self.src_connection = netapp_utils.create_sf_connection(self.module)
+ self.module.params["username"] = params["dest_username"]
+ self.module.params["password"] = params["dest_password"]
+ self.module.params["hostname"] = params["dest_hostname"]
+ self.dest_connection = netapp_utils.create_sf_connection(self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.src_connection)
+
+ # add telemetry attributes
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_backup')
+
+ def apply(self):
+ """
+ Apply backup creation logic
+ """
+ self.create_backup()
+ self.module.exit_json(changed=True)
+
+ def create_backup(self):
+ """
+ Create backup
+ """
+
+ # Start volume write on destination cluster
+
+ try:
+ write_obj = self.dest_connection.start_bulk_volume_write(volume_id=self.module.params["dest_volume_id"],
+ format=self.module.params["format"],
+ attributes=self.attributes)
+ write_key = write_obj.key
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error starting bulk write on destination cluster", exception=to_native(err))
+
+ # Set script parameters if not passed by user
+ # These parameters are equivalent to the options used when a backup is executed via the GUI
+
+ if self.module.params["script"] is None and self.module.params["script_parameters"] is None:
+
+ self.module.params["script"] = 'bv_internal.py'
+ self.module.params["script_parameters"] = {"write": {
+ "mvip": self.module.params["dest_hostname"],
+ "username": self.module.params["dest_username"],
+ "password": self.module.params["dest_password"],
+ "key": write_key,
+ "endpoint": "solidfire",
+ "format": self.module.params["format"]},
+ "range": {"lba": 0, "blocks": 244224}}
+
+ # Start volume read on source cluster
+
+ try:
+ read_obj = self.src_connection.start_bulk_volume_read(self.module.params["src_volume_id"],
+ self.module.params["format"],
+ script=self.module.params["script"],
+ script_parameters=self.module.params["script_parameters"],
+ attributes=self.attributes)
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error starting bulk read on source cluster", exception=to_native(err))
+
+ # Poll job status until it has completed
+ # SF will automatically timeout if the job is not successful after certain amount of time
+
+ completed = False
+ while completed is not True:
+ # Sleep between polling iterations to reduce api load
+ time.sleep(2)
+ try:
+ result = self.src_connection.get_async_result(read_obj.async_handle, True)
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Unable to check backup job status", exception=to_native(err))
+
+ if result["status"] != 'running':
+ completed = True
+ if 'error' in result:
+ self.module.fail_json(msg=result['error']['message'])
+
+
+def main():
+ """ Run backup operation"""
+ vol_obj = ElementSWBackup()
+ vol_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_check_connections.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_check_connections.py
new file mode 100644
index 000000000..2f288dc3a
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_check_connections.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_check_connections
+
+short_description: NetApp Element Software Check connectivity to MVIP and SVIP.
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Used to test the management connection to the cluster.
+- The test pings the MVIP and SVIP, and executes a simple API method to verify connectivity.
+
+options:
+
+ skip:
+ description:
+ - Skip checking connection to SVIP or MVIP.
+ choices: ['svip', 'mvip']
+ type: str
+
+ mvip:
+ description:
+ - Optionally, use to test connection of a different MVIP.
+ - This is not needed to test the connection to the target cluster.
+ type: str
+
+ svip:
+ description:
+ - Optionally, use to test connection of a different SVIP.
+ - This is not needed to test the connection to the target cluster.
+ type: str
+
+'''
+
+
+EXAMPLES = """
+ - name: Check connections to MVIP and SVIP
+ na_elementsw_check_connections:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class NaElementSWConnection(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ skip=dict(required=False, type='str', default=None, choices=['mvip', 'svip']),
+ mvip=dict(required=False, type='str', default=None),
+ svip=dict(required=False, type='str', default=None)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('skip', 'svip', ['mvip']),
+ ('skip', 'mvip', ['svip'])
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.module.params.copy()
+ self.msg = ""
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the ElementSW Python SDK")
+ else:
+ self.elem = netapp_utils.create_sf_connection(self.module, port=442)
+
+ def check_mvip_connection(self):
+ """
+ Check connection to MVIP
+
+ :return: true if connection was successful, false otherwise.
+ :rtype: bool
+ """
+ try:
+ test = self.elem.test_connect_mvip(mvip=self.parameters['mvip'])
+ # Todo - Log details about the test
+ return test.details.connected
+
+ except Exception as e:
+ self.msg += 'Error checking connection to MVIP: %s' % to_native(e)
+ return False
+
+ def check_svip_connection(self):
+ """
+ Check connection to SVIP
+
+ :return: true if connection was successful, false otherwise.
+ :rtype: bool
+ """
+ try:
+ test = self.elem.test_connect_svip(svip=self.parameters['svip'])
+ # Todo - Log details about the test
+ return test.details.connected
+ except Exception as e:
+ self.msg += 'Error checking connection to SVIP: %s' % to_native(e)
+ return False
+
+ def apply(self):
+ passed = False
+ if self.parameters.get('skip') is None:
+ # Set failed and msg
+ passed = self.check_mvip_connection()
+ # check if both connections have passed
+ passed &= self.check_svip_connection()
+ elif self.parameters['skip'] == 'mvip':
+ passed |= self.check_svip_connection()
+ elif self.parameters['skip'] == 'svip':
+ passed |= self.check_mvip_connection()
+ if not passed:
+ self.module.fail_json(msg=self.msg)
+ else:
+ self.module.exit_json()
+
+
+def main():
+ connect_obj = NaElementSWConnection()
+ connect_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster.py
new file mode 100644
index 000000000..ede60cae3
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster.py
@@ -0,0 +1,372 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software Initialize Cluster
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_cluster
+
+short_description: NetApp Element Software Create Cluster
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Initialize Element Software node ownership to form a cluster.
+ - If the cluster does not exist, username/password are still required but ignored for initial creation.
+ - username/password are used as the node credentials to see if the cluster already exists.
+ - username/password can also be used to set the cluster credentials.
+ - If the cluster already exists, no error is returned, but changed is set to false.
+ - Cluster modifications are not supported and are ignored.
+
+options:
+ management_virtual_ip:
+ description:
+ - Floating (virtual) IP address for the cluster on the management network.
+ required: true
+ type: str
+
+ storage_virtual_ip:
+ description:
+ - Floating (virtual) IP address for the cluster on the storage (iSCSI) network.
+ required: true
+ type: str
+
+ replica_count:
+ description:
+ - Number of replicas of each piece of data to store in the cluster.
+ default: 2
+ type: int
+
+ cluster_admin_username:
+ description:
+ - Username for the cluster admin.
+ - If not provided, default to username.
+ type: str
+
+ cluster_admin_password:
+ description:
+ - Initial password for the cluster admin account.
+ - If not provided, default to password.
+ type: str
+
+ accept_eula:
+ description:
+ - Required to indicate your acceptance of the End User License Agreement when creating this cluster.
+ - To accept the EULA, set this parameter to true.
+ type: bool
+
+ nodes:
+ description:
+ - Storage IP (SIP) addresses of the initial set of nodes making up the cluster.
+ - nodes IP must be in the list.
+ required: true
+ type: list
+ elements: str
+
+ attributes:
+ description:
+ - List of name-value pairs in JSON object format.
+ type: dict
+
+ timeout:
+ description:
+ - Time to wait for cluster creation to complete.
+ default: 100
+ type: int
+ version_added: 20.8.0
+
+ fail_if_cluster_already_exists_with_larger_ensemble:
+ description:
+ - If the cluster exists, the default is to verify that I(nodes) is a superset of the existing ensemble.
+ - A superset is accepted because some nodes may have a different role.
+ - But the module reports an error if the existing ensemble contains a node not listed in I(nodes).
+ - This checker is disabled when this option is set to false.
+ default: true
+ type: bool
+ version_added: 20.8.0
+
+ encryption:
+ description: to enable or disable encryption at rest
+ type: bool
+ version_added: 20.10.0
+
+ order_number:
+ description: (experimental) order number as provided by NetApp
+ type: str
+ version_added: 20.10.0
+
+ serial_number:
+ description: (experimental) serial number as provided by NetApp
+ type: str
+ version_added: 20.10.0
+'''
+
+EXAMPLES = """
+
+ - name: Initialize new cluster
+ tags:
+ - elementsw_cluster
+ na_elementsw_cluster:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ management_virtual_ip: 10.226.108.32
+ storage_virtual_ip: 10.226.109.68
+ replica_count: 2
+ accept_eula: true
+ nodes:
+ - 10.226.109.72
+ - 10.226.109.74
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementSWCluster(object):
+ """
+ Element Software Initialize node with ownership for cluster formation
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ management_virtual_ip=dict(required=True, type='str'),
+ storage_virtual_ip=dict(required=True, type='str'),
+ replica_count=dict(required=False, type='int', default=2),
+ cluster_admin_username=dict(required=False, type='str'),
+ cluster_admin_password=dict(required=False, type='str', no_log=True),
+ accept_eula=dict(required=False, type='bool'),
+ nodes=dict(required=True, type='list', elements='str'),
+ attributes=dict(required=False, type='dict', default=None),
+ timeout=dict(required=False, type='int', default=100),
+ fail_if_cluster_already_exists_with_larger_ensemble=dict(required=False, type='bool', default=True),
+ encryption=dict(required=False, type='bool'),
+ order_number=dict(required=False, type='str'),
+ serial_number=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ input_params = self.module.params
+
+ self.management_virtual_ip = input_params['management_virtual_ip']
+ self.storage_virtual_ip = input_params['storage_virtual_ip']
+ self.replica_count = input_params['replica_count']
+ self.accept_eula = input_params.get('accept_eula')
+ self.attributes = input_params.get('attributes')
+ self.nodes = input_params['nodes']
+ self.cluster_admin_username = input_params['username'] if input_params.get('cluster_admin_username') is None else input_params['cluster_admin_username']
+ self.cluster_admin_password = input_params['password'] if input_params.get('cluster_admin_password') is None else input_params['cluster_admin_password']
+ self.fail_if_cluster_already_exists_with_larger_ensemble = input_params['fail_if_cluster_already_exists_with_larger_ensemble']
+ self.encryption = input_params['encryption']
+ self.order_number = input_params['order_number']
+ self.serial_number = input_params['serial_number']
+ self.debug = list()
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+
+ # 442 for node APIs, 443 (default) for cluster APIs
+ for role, port in [('node', 442), ('cluster', 443)]:
+ try:
+ # even though username/password should be optional, create_sf_connection fails if not set
+ conn = netapp_utils.create_sf_connection(module=self.module, raise_on_connection_error=True, port=port, timeout=input_params['timeout'])
+ if role == 'node':
+ self.sfe_node = conn
+ else:
+ self.sfe_cluster = conn
+ except netapp_utils.solidfire.common.ApiConnectionError as exc:
+ if str(exc) == "Bad Credentials":
+ msg = 'Most likely the cluster is already created.'
+ msg += ' Make sure to use valid %s credentials for username and password.' % 'node' if port == 442 else 'cluster'
+ msg += ' Even though credentials are not required for the first create, they are needed to check whether the cluster already exists.'
+ msg += ' Cluster reported: %s' % repr(exc)
+ else:
+ msg = 'Failed to create connection: %s' % repr(exc)
+ self.module.fail_json(msg=msg)
+ except Exception as exc:
+ self.module.fail_json(msg='Failed to connect: %s' % repr(exc))
+
+ self.elementsw_helper = NaElementSWModule(self.sfe_cluster)
+
+ # add telemetry attributes
+ if self.attributes is not None:
+ self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_cluster'))
+ else:
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_cluster')
+
+ def get_node_cluster_info(self):
+ """
+ Get Cluster Info - using node API
+ """
+ try:
+ info = self.sfe_node.get_config()
+ self.debug.append(repr(info.config.cluster))
+ return info.config.cluster
+ except Exception as exc:
+ self.debug.append("port: %s, %s" % (str(self.sfe_node._port), repr(exc)))
+ return None
+
+ def check_cluster_exists(self):
+ """
+ validate if cluster exists with list of nodes
+ error out if something is found but with different nodes
+ return a tuple (found, info)
+ found is True if found, False if not found
+ """
+ info = self.get_node_cluster_info()
+ if info is None:
+ return False
+ ensemble = getattr(info, 'ensemble', None)
+ if not ensemble:
+ return False
+ # format is 'id:IP'
+ nodes = [x.split(':', 1)[1] for x in ensemble]
+ current_ensemble_nodes = set(nodes) if ensemble else set()
+ requested_nodes = set(self.nodes) if self.nodes else set()
+ extra_ensemble_nodes = current_ensemble_nodes - requested_nodes
+ # TODO: the cluster may have more nodes than what is reported in ensemble:
+ # nodes_not_in_ensemble = requested_nodes - current_ensemble_nodes
+ # So it's OK to find some missing nodes, but not very deterministic.
+ # eg some kind of backup nodes could be in nodes_not_in_ensemble.
+ if extra_ensemble_nodes and self.fail_if_cluster_already_exists_with_larger_ensemble:
+ msg = 'Error: found existing cluster with more nodes in ensemble. Cluster: %s, extra nodes: %s' %\
+ (getattr(info, 'cluster', 'not found'), extra_ensemble_nodes)
+ msg += '. Cluster info: %s' % repr(info)
+ self.module.fail_json(msg=msg)
+ if extra_ensemble_nodes:
+ self.debug.append("Extra ensemble nodes: %s" % extra_ensemble_nodes)
+ nodes_not_in_ensemble = requested_nodes - current_ensemble_nodes
+ if nodes_not_in_ensemble:
+ self.debug.append("Extra requested nodes not in ensemble: %s" % nodes_not_in_ensemble)
+ return True
+
+ def create_cluster_api(self, options):
+ ''' Call send_request directly rather than using the SDK if new fields are present
+ The new SDK will support these in version 1.17 (Nov or Feb)
+ '''
+ extra_options = ['enableSoftwareEncryptionAtRest', 'orderNumber', 'serialNumber']
+ if not any((item in options for item in extra_options)):
+ # use SDK
+ return self.sfe_cluster.create_cluster(**options)
+
+ # call directly the API as the SDK is not updated yet
+ params = {
+ "mvip": options['mvip'],
+ "svip": options['svip'],
+ "repCount": options['rep_count'],
+ "username": options['username'],
+ "password": options['password'],
+ "nodes": options['nodes'],
+ }
+ if options['accept_eula'] is not None:
+ params["acceptEula"] = options['accept_eula']
+ if options['attributes'] is not None:
+ params["attributes"] = options['attributes']
+ for option in extra_options:
+ if options.get(option):
+ params[option] = options[option]
+
+ # There is no adaptor.
+ return self.sfe_cluster.send_request(
+ 'CreateCluster',
+ netapp_utils.solidfire.CreateClusterResult,
+ params,
+ since=None
+ )
+
+ def create_cluster(self):
+ """
+ Create Cluster
+ """
+ options = {
+ 'mvip': self.management_virtual_ip,
+ 'svip': self.storage_virtual_ip,
+ 'rep_count': self.replica_count,
+ 'accept_eula': self.accept_eula,
+ 'nodes': self.nodes,
+ 'attributes': self.attributes,
+ 'username': self.cluster_admin_username,
+ 'password': self.cluster_admin_password
+ }
+ if self.encryption is not None:
+ options['enableSoftwareEncryptionAtRest'] = self.encryption
+ if self.order_number is not None:
+ options['orderNumber'] = self.order_number
+ if self.serial_number is not None:
+ options['serialNumber'] = self.serial_number
+
+ return_msg = 'created'
+ try:
+ # does not work as node even though documentation says otherwise
+ # running as node, this error is reported: 500 xUnknownAPIMethod method=CreateCluster
+ self.create_cluster_api(options)
+ except netapp_utils.solidfire.common.ApiServerError as exc:
+ # not sure how this can happen, but the cluster may already exists
+ if 'xClusterAlreadyCreated' not in str(exc.message):
+ self.module.fail_json(msg='Error creating cluster %s' % to_native(exc), exception=traceback.format_exc())
+ return_msg = 'already_exists: %s' % str(exc.message)
+ except Exception as exc:
+ self.module.fail_json(msg='Error creating cluster %s' % to_native(exc), exception=traceback.format_exc())
+ return return_msg
+
+ def apply(self):
+ """
+ Check connection and initialize node with cluster ownership
+ """
+ changed = False
+ result_message = None
+ exists = self.check_cluster_exists()
+ if exists:
+ result_message = "cluster already exists"
+ else:
+ changed = True
+ if not self.module.check_mode:
+ result_message = self.create_cluster()
+ if result_message.startswith('already_exists:'):
+ changed = False
+ self.module.exit_json(changed=changed, msg=result_message, debug=self.debug)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_cluster = ElementSWCluster()
+ na_elementsw_cluster.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_config.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_config.py
new file mode 100644
index 000000000..94b5c17dc
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_config.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software Configure cluster
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_cluster_config
+
+short_description: Configure Element SW Cluster
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Configure Element Software cluster.
+
+options:
+ modify_cluster_full_threshold:
+ description:
+ - The capacity level at which the cluster generates an event
+ - Requires a stage3_block_threshold_percent or
+ - max_metadata_over_provision_factor or
+ - stage2_aware_threshold
+ suboptions:
+ stage3_block_threshold_percent:
+ description:
+ - The percentage below the "Error" threshold that triggers a cluster "Warning" alert
+ type: int
+ max_metadata_over_provision_factor:
+ description:
+ - The number of times metadata space can be overprovisioned relative to the amount of space available
+ type: int
+ stage2_aware_threshold:
+ description:
+ - The number of nodes of capacity remaining in the cluster before the system triggers a notification
+ type: int
+ type: dict
+
+ encryption_at_rest:
+ description:
+ - enable or disable the Advanced Encryption Standard (AES) 256-bit encryption at rest on the cluster
+ choices: ['present', 'absent']
+ type: str
+
+ set_ntp_info:
+ description:
+ - configure NTP on cluster node
+ - Requires a list of one or more ntp_servers
+ suboptions:
+ ntp_servers:
+ description:
+ - list of NTP servers to add to each nodes NTP configuration
+ type: list
+ elements: str
+ broadcastclient:
+ type: bool
+ default: False
+ description:
+ - Enables every node in the cluster as a broadcast client
+ type: dict
+
+ enable_virtual_volumes:
+ type: bool
+ default: True
+ description:
+ - Enable the NetApp SolidFire VVols cluster feature
+'''
+
+EXAMPLES = """
+
+ - name: Configure cluster
+ tags:
+ - elementsw_cluster_config
+ na_elementsw_cluster_config:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ modify_cluster_full_threshold:
+ stage2_aware_threshold: 2
+ stage3_block_threshold_percent: 10
+ max_metadata_over_provision_factor: 2
+ encryption_at_rest: absent
+ set_ntp_info:
+ broadcastclient: False
+ ntp_servers:
+ - 1.1.1.1
+ - 2.2.2.2
+ enable_virtual_volumes: True
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementSWClusterConfig(object):
+ """
+ Element Software Configure Element SW Cluster
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+
+ self.argument_spec.update(dict(
+ modify_cluster_full_threshold=dict(
+ type='dict',
+ options=dict(
+ stage2_aware_threshold=dict(type='int', default=None),
+ stage3_block_threshold_percent=dict(type='int', default=None),
+ max_metadata_over_provision_factor=dict(type='int', default=None)
+ )
+ ),
+ encryption_at_rest=dict(type='str', choices=['present', 'absent']),
+ set_ntp_info=dict(
+ type='dict',
+ options=dict(
+ broadcastclient=dict(type='bool', default=False),
+ ntp_servers=dict(type='list', elements='str')
+ )
+ ),
+ enable_virtual_volumes=dict(type='bool', default=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_ntp_details(self):
+ """
+ get ntp info
+ """
+ # Get ntp details
+ ntp_details = self.sfe.get_ntp_info()
+ return ntp_details
+
+ def cmp(self, provided_ntp_servers, existing_ntp_servers):
+ # As python3 doesn't have default cmp function, defining manually to provide same fuctionality.
+ return (provided_ntp_servers > existing_ntp_servers) - (provided_ntp_servers < existing_ntp_servers)
+
+ def get_cluster_details(self):
+ """
+ get cluster info
+ """
+ cluster_details = self.sfe.get_cluster_info()
+ return cluster_details
+
+ def get_vvols_status(self):
+ """
+ get vvols status
+ """
+ feature_status = self.sfe.get_feature_status(feature='vvols')
+ if feature_status is not None:
+ return feature_status.features[0].enabled
+ return None
+
+ def get_cluster_full_threshold_status(self):
+ """
+ get cluster full threshold
+ """
+ cluster_full_threshold_status = self.sfe.get_cluster_full_threshold()
+ return cluster_full_threshold_status
+
+ def setup_ntp_info(self, servers, broadcastclient=None):
+ """
+ configure ntp
+ """
+ # Set ntp servers
+ try:
+ self.sfe.set_ntp_info(servers, broadcastclient)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error configuring ntp %s' % (to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def set_encryption_at_rest(self, state=None):
+ """
+ enable/disable encryption at rest
+ """
+ try:
+ if state == 'present':
+ encryption_state = 'enable'
+ self.sfe.enable_encryption_at_rest()
+ elif state == 'absent':
+ encryption_state = 'disable'
+ self.sfe.disable_encryption_at_rest()
+ except Exception as exception_object:
+ self.module.fail_json(msg='Failed to %s rest encryption %s' % (encryption_state,
+ to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def enable_feature(self, feature):
+ """
+ enable feature
+ """
+ try:
+ self.sfe.enable_feature(feature=feature)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error enabling %s %s' % (feature, to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def set_cluster_full_threshold(self, stage2_aware_threshold=None,
+ stage3_block_threshold_percent=None,
+ max_metadata_over_provision_factor=None):
+ """
+ modify cluster full threshold
+ """
+ try:
+ self.sfe.modify_cluster_full_threshold(stage2_aware_threshold=stage2_aware_threshold,
+ stage3_block_threshold_percent=stage3_block_threshold_percent,
+ max_metadata_over_provision_factor=max_metadata_over_provision_factor)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Failed to modify cluster full threshold %s' % (to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Cluster configuration
+ """
+ changed = False
+ result_message = None
+
+ if self.parameters.get('modify_cluster_full_threshold') is not None:
+ # get cluster full threshold
+ cluster_full_threshold_details = self.get_cluster_full_threshold_status()
+ # maxMetadataOverProvisionFactor
+ current_mmopf = cluster_full_threshold_details.max_metadata_over_provision_factor
+ # stage3BlockThresholdPercent
+ current_s3btp = cluster_full_threshold_details.stage3_block_threshold_percent
+ # stage2AwareThreshold
+ current_s2at = cluster_full_threshold_details.stage2_aware_threshold
+
+ # is cluster full threshold state change required?
+ if self.parameters.get("modify_cluster_full_threshold")['max_metadata_over_provision_factor'] is not None and \
+ current_mmopf != self.parameters['modify_cluster_full_threshold']['max_metadata_over_provision_factor'] or \
+ self.parameters.get("modify_cluster_full_threshold")['stage3_block_threshold_percent'] is not None and \
+ current_s3btp != self.parameters['modify_cluster_full_threshold']['stage3_block_threshold_percent'] or \
+ self.parameters.get("modify_cluster_full_threshold")['stage2_aware_threshold'] is not None and \
+ current_s2at != self.parameters['modify_cluster_full_threshold']['stage2_aware_threshold']:
+ changed = True
+ self.set_cluster_full_threshold(self.parameters['modify_cluster_full_threshold']['stage2_aware_threshold'],
+ self.parameters['modify_cluster_full_threshold']['stage3_block_threshold_percent'],
+ self.parameters['modify_cluster_full_threshold']['max_metadata_over_provision_factor'])
+
+ if self.parameters.get('encryption_at_rest') is not None:
+ # get all cluster info
+ cluster_info = self.get_cluster_details()
+ # register rest state
+ current_encryption_at_rest_state = cluster_info.cluster_info.encryption_at_rest_state
+
+ # is encryption state change required?
+ if current_encryption_at_rest_state == 'disabled' and self.parameters['encryption_at_rest'] == 'present' or \
+ current_encryption_at_rest_state == 'enabled' and self.parameters['encryption_at_rest'] == 'absent':
+ changed = True
+ self.set_encryption_at_rest(self.parameters['encryption_at_rest'])
+
+ if self.parameters.get('set_ntp_info') is not None:
+ # get all ntp details
+ ntp_details = self.get_ntp_details()
+ # register list of ntp servers
+ ntp_servers = ntp_details.servers
+ # broadcastclient
+ broadcast_client = ntp_details.broadcastclient
+
+ # has either the broadcastclient or the ntp server list changed?
+
+ if self.parameters.get('set_ntp_info')['broadcastclient'] != broadcast_client or \
+ self.cmp(self.parameters.get('set_ntp_info')['ntp_servers'], ntp_servers) != 0:
+ changed = True
+ self.setup_ntp_info(self.parameters.get('set_ntp_info')['ntp_servers'],
+ self.parameters.get('set_ntp_info')['broadcastclient'])
+
+ if self.parameters.get('enable_virtual_volumes') is not None:
+ # check vvols status
+ current_vvols_status = self.get_vvols_status()
+
+ # has the vvols state changed?
+ if current_vvols_status is False and self.parameters.get('enable_virtual_volumes') is True:
+ changed = True
+ self.enable_feature('vvols')
+ elif current_vvols_status is True and self.parameters.get('enable_virtual_volumes') is not True:
+ # vvols, once enabled, cannot be disabled
+ self.module.fail_json(msg='Error disabling vvols: this feature cannot be undone')
+
+ if self.module.check_mode is True:
+ result_message = "Check mode, skipping changes"
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_cluster_config = ElementSWClusterConfig()
+ na_elementsw_cluster_config.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_pair.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_pair.py
new file mode 100644
index 000000000..af064e214
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_pair.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_cluster_pair
+
+short_description: NetApp Element Software Manage Cluster Pair
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete cluster pair
+
+options:
+
+ state:
+ description:
+ - Whether the specified cluster pair should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ dest_mvip:
+ description:
+ - Destination IP address of the cluster to be paired.
+ required: true
+ type: str
+
+ dest_username:
+ description:
+ - Destination username for the cluster to be paired.
+ - Optional if this is same as source cluster username.
+ type: str
+
+ dest_password:
+ description:
+ - Destination password for the cluster to be paired.
+ - Optional if this is same as source cluster password.
+ type: str
+
+'''
+
+EXAMPLES = """
+ - name: Create cluster pair
+ na_elementsw_cluster_pair:
+ hostname: "{{ src_hostname }}"
+ username: "{{ src_username }}"
+ password: "{{ src_password }}"
+ state: present
+ dest_mvip: "{{ dest_hostname }}"
+
+ - name: Delete cluster pair
+ na_elementsw_cluster_pair:
+ hostname: "{{ src_hostname }}"
+ username: "{{ src_username }}"
+ password: "{{ src_password }}"
+ state: absent
+ dest_mvip: "{{ dest_hostname }}"
+ dest_username: "{{ dest_username }}"
+ dest_password: "{{ dest_password }}"
+
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWClusterPair(object):
+ """ class to handle cluster pairing operations """
+
+ def __init__(self):
+ """
+ Setup Ansible parameters and ElementSW connection
+ """
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'],
+ default='present'),
+ dest_mvip=dict(required=True, type='str'),
+ dest_username=dict(required=False, type='str'),
+ dest_password=dict(required=False, type='str', no_log=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.elem = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.elem)
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # get element_sw_connection for destination cluster
+ # overwrite existing source host, user and password with destination credentials
+ self.module.params['hostname'] = self.parameters['dest_mvip']
+ # username and password is same as source,
+ # if dest_username and dest_password aren't specified
+ if self.parameters.get('dest_username'):
+ self.module.params['username'] = self.parameters['dest_username']
+ if self.parameters.get('dest_password'):
+ self.module.params['password'] = self.parameters['dest_password']
+ self.dest_elem = netapp_utils.create_sf_connection(module=self.module)
+ self.dest_elementsw_helper = NaElementSWModule(self.dest_elem)
+
+ def check_if_already_paired(self, paired_clusters, hostname):
+ for pair in paired_clusters.cluster_pairs:
+ if pair.mvip == hostname:
+ return pair.cluster_pair_id
+ return None
+
+ def get_src_pair_id(self):
+ """
+ Check for idempotency
+ """
+ # src cluster and dest cluster exist
+ paired_clusters = self.elem.list_cluster_pairs()
+ return self.check_if_already_paired(paired_clusters, self.parameters['dest_mvip'])
+
+ def get_dest_pair_id(self):
+ """
+ Getting destination cluster_pair_id
+ """
+ paired_clusters = self.dest_elem.list_cluster_pairs()
+ return self.check_if_already_paired(paired_clusters, self.parameters['hostname'])
+
+ def pair_clusters(self):
+ """
+ Start cluster pairing on source, and complete on target cluster
+ """
+ try:
+ pair_key = self.elem.start_cluster_pairing()
+ self.dest_elem.complete_cluster_pairing(
+ cluster_pairing_key=pair_key.cluster_pairing_key)
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error pairing cluster %s and %s"
+ % (self.parameters['hostname'],
+ self.parameters['dest_mvip']),
+ exception=to_native(err))
+
+ def unpair_clusters(self, pair_id_source, pair_id_dest):
+ """
+ Delete cluster pair
+ """
+ try:
+ self.elem.remove_cluster_pair(cluster_pair_id=pair_id_source)
+ self.dest_elem.remove_cluster_pair(cluster_pair_id=pair_id_dest)
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error unpairing cluster %s and %s"
+ % (self.parameters['hostname'],
+ self.parameters['dest_mvip']),
+ exception=to_native(err))
+
+ def apply(self):
+ """
+ Call create / delete cluster pair methods
+ """
+ pair_id_source = self.get_src_pair_id()
+ # If already paired, find the cluster_pair_id of destination cluster
+ if pair_id_source:
+ pair_id_dest = self.get_dest_pair_id()
+ # calling helper to determine action
+ cd_action = self.na_helper.get_cd_action(pair_id_source, self.parameters)
+ if cd_action == "create":
+ self.pair_clusters()
+ elif cd_action == "delete":
+ self.unpair_clusters(pair_id_source, pair_id_dest)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """ Apply cluster pair actions """
+ cluster_obj = ElementSWClusterPair()
+ cluster_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_snmp.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_snmp.py
new file mode 100644
index 000000000..847700197
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_cluster_snmp.py
@@ -0,0 +1,365 @@
+#!/usr/bin/python
+# (c) 2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software Configure SNMP
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_cluster_snmp
+
+short_description: Configure Element SW Cluster SNMP
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Configure Element Software cluster SNMP.
+
+options:
+
+ state:
+ description:
+ - This module enables you to enable SNMP on cluster nodes. When you enable SNMP, \
+ the action applies to all nodes in the cluster, and the values that are passed replace, \
+ in whole, all values set in any previous call to this module.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ snmp_v3_enabled:
+ description:
+ - Which version of SNMP has to be enabled.
+ type: bool
+
+ networks:
+ description:
+ - List of networks and what type of access they have to the SNMP servers running on the cluster nodes.
+ - This parameter is required if SNMP v3 is disabled.
+ suboptions:
+ access:
+ description:
+ - ro for read-only access.
+ - rw for read-write access.
+ - rosys for read-only access to a restricted set of system information.
+ choices: ['ro', 'rw', 'rosys']
+ type: str
+ cidr:
+ description:
+ - A CIDR network mask. This network mask must be an integer greater than or equal to 0, \
+ and less than or equal to 32. It must also not be equal to 31.
+ type: int
+ community:
+ description:
+ - SNMP community string.
+ type: str
+ network:
+ description:
+ - This parameter along with the cidr variable is used to control which network the access and \
+ community string apply to.
+ - The special value of 'default' is used to specify an entry that applies to all networks.
+ - The cidr mask is ignored when network value is either a host name or default.
+ type: str
+ type: dict
+
+ usm_users:
+ description:
+ - List of users and the type of access they have to the SNMP servers running on the cluster nodes.
+ - This parameter is required if SNMP v3 is enabled.
+ suboptions:
+ access:
+ description:
+ - rouser for read-only access.
+ - rwuser for read-write access.
+ - rosys for read-only access to a restricted set of system information.
+ choices: ['rouser', 'rwuser', 'rosys']
+ type: str
+ name:
+ description:
+ - The name of the user. Must contain at least one character, but no more than 32 characters.
+ - Blank spaces are not allowed.
+ type: str
+ password:
+ description:
+ - The password of the user. Must be between 8 and 255 characters long (inclusive).
+ - Blank spaces are not allowed.
+ - Required if 'secLevel' is 'auth' or 'priv.'
+ type: str
+ passphrase:
+ description:
+ - The passphrase of the user. Must be between 8 and 255 characters long (inclusive).
+ - Blank spaces are not allowed.
+ - Required if 'secLevel' is 'priv.'
+ type: str
+ secLevel:
+ description:
+ - To define the security level of a user.
+ choices: ['noauth', 'auth', 'priv']
+ type: str
+ type: dict
+
+'''
+
+EXAMPLES = """
+
+ - name: configure SnmpNetwork
+ tags:
+ - elementsw_cluster_snmp
+ na_elementsw_cluster_snmp:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ snmp_v3_enabled: True
+ usm_users:
+ access: rouser
+ name: testuser
+ password: ChangeMe123
+ passphrase: ChangeMe123
+ secLevel: auth
+ networks:
+ access: ro
+ cidr: 24
+ community: TestNetwork
+ network: 192.168.0.1
+
+ - name: Disable SnmpNetwork
+ tags:
+ - elementsw_cluster_snmp
+ na_elementsw_cluster_snmp:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementSWClusterSnmp(object):
+ """
+ Element Software Configure Element SW Cluster SnmpNetwork
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+
+ self.argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ snmp_v3_enabled=dict(type='bool'),
+ networks=dict(
+ type='dict',
+ options=dict(
+ access=dict(type='str', choices=['ro', 'rw', 'rosys']),
+ cidr=dict(type='int', default=None),
+ community=dict(type='str', default=None),
+ network=dict(type='str', default=None)
+ )
+ ),
+ usm_users=dict(
+ type='dict',
+ options=dict(
+ access=dict(type='str', choices=['rouser', 'rwuser', 'rosys']),
+ name=dict(type='str', default=None),
+ password=dict(type='str', default=None, no_log=True),
+ passphrase=dict(type='str', default=None, no_log=True),
+ secLevel=dict(type='str', choices=['auth', 'noauth', 'priv'])
+ )
+ ),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['snmp_v3_enabled']),
+ ('snmp_v3_enabled', True, ['usm_users']),
+ ('snmp_v3_enabled', False, ['networks'])
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ if self.parameters.get('state') == "present":
+ if self.parameters.get('usm_users') is not None:
+ # Getting the configuration details to configure SNMP Version3
+ self.access_usm = self.parameters.get('usm_users')['access']
+ self.name = self.parameters.get('usm_users')['name']
+ self.password = self.parameters.get('usm_users')['password']
+ self.passphrase = self.parameters.get('usm_users')['passphrase']
+ self.secLevel = self.parameters.get('usm_users')['secLevel']
+ if self.parameters.get('networks') is not None:
+ # Getting the configuration details to configure SNMP Version2
+ self.access_network = self.parameters.get('networks')['access']
+ self.cidr = self.parameters.get('networks')['cidr']
+ self.community = self.parameters.get('networks')['community']
+ self.network = self.parameters.get('networks')['network']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def enable_snmp(self):
+ """
+ enable snmp feature
+ """
+ try:
+ self.sfe.enable_snmp(snmp_v3_enabled=self.parameters.get('snmp_v3_enabled'))
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error enabling snmp feature %s' % to_native(exception_object),
+ exception=traceback.format_exc())
+
+ def disable_snmp(self):
+ """
+ disable snmp feature
+ """
+ try:
+ self.sfe.disable_snmp()
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error disabling snmp feature %s' % to_native(exception_object),
+ exception=traceback.format_exc())
+
+ def configure_snmp(self, actual_networks, actual_usm_users):
+ """
+ Configure snmp
+ """
+ try:
+ self.sfe.set_snmp_acl(networks=[actual_networks], usm_users=[actual_usm_users])
+
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error Configuring snmp feature %s' % to_native(exception_object),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Cluster SNMP configuration
+ """
+ changed = False
+ result_message = None
+ update_required = False
+ version_change = False
+ is_snmp_enabled = self.sfe.get_snmp_state().enabled
+
+ if is_snmp_enabled is True:
+ # IF SNMP is already enabled
+ if self.parameters.get('state') == 'absent':
+ # Checking for state change(s) here, and applying it later in the code allows us to support
+ # check_mode
+ changed = True
+
+ elif self.parameters.get('state') == 'present':
+ # Checking if SNMP configuration needs to be updated,
+ is_snmp_v3_enabled = self.sfe.get_snmp_state().snmp_v3_enabled
+
+ if is_snmp_v3_enabled != self.parameters.get('snmp_v3_enabled'):
+ # Checking if there any version changes required
+ version_change = True
+ changed = True
+
+ if is_snmp_v3_enabled is True:
+ # Checking If snmp configuration for usm_users needs modification
+ if len(self.sfe.get_snmp_info().usm_users) == 0:
+ # If snmp is getting configured for first time
+ update_required = True
+ changed = True
+ else:
+ for usm_user in self.sfe.get_snmp_info().usm_users:
+ if usm_user.access != self.access_usm or usm_user.name != self.name or usm_user.password != self.password or \
+ usm_user.passphrase != self.passphrase or usm_user.sec_level != self.secLevel:
+ update_required = True
+ changed = True
+ else:
+ # Checking If snmp configuration for networks needs modification
+ for snmp_network in self.sfe.get_snmp_info().networks:
+ if snmp_network.access != self.access_network or snmp_network.cidr != self.cidr or \
+ snmp_network.community != self.community or snmp_network.network != self.network:
+ update_required = True
+ changed = True
+
+ else:
+ if self.parameters.get('state') == 'present':
+ changed = True
+
+ result_message = ""
+
+ if changed:
+ if self.module.check_mode is True:
+ result_message = "Check mode, skipping changes"
+
+ else:
+ if self.parameters.get('state') == "present":
+ # IF snmp is not enabled, then enable and configure snmp
+ if self.parameters.get('snmp_v3_enabled') is True:
+ # IF SNMP is enabled with version 3
+ usm_users = {'access': self.access_usm,
+ 'name': self.name,
+ 'password': self.password,
+ 'passphrase': self.passphrase,
+ 'secLevel': self.secLevel}
+ networks = None
+ else:
+ # IF SNMP is enabled with version 2
+ usm_users = None
+ networks = {'access': self.access_network,
+ 'cidr': self.cidr,
+ 'community': self.community,
+ 'network': self.network}
+
+ if is_snmp_enabled is False or version_change is True:
+ # Enable and configure snmp
+ self.enable_snmp()
+ self.configure_snmp(networks, usm_users)
+ result_message = "SNMP is enabled and configured"
+
+ elif update_required is True:
+ # If snmp is already enabled, update the configuration if required
+ self.configure_snmp(networks, usm_users)
+ result_message = "SNMP is configured"
+
+ elif is_snmp_enabled is True and self.parameters.get('state') == "absent":
+ # If snmp is enabled and state is absent, disable snmp
+ self.disable_snmp()
+ result_message = "SNMP is disabled"
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_cluster_snmp = ElementSWClusterSnmp()
+ na_elementsw_cluster_snmp.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_drive.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_drive.py
new file mode 100644
index 000000000..f0fd7e38b
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_drive.py
@@ -0,0 +1,368 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software Node Drives
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_drive
+
+short_description: NetApp Element Software Manage Node Drives
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Add, Erase or Remove drive for nodes on Element Software Cluster.
+
+options:
+ drive_ids:
+ description:
+ - List of Drive IDs or Serial Names of Node drives.
+ - If not specified, add and remove action will be performed on all drives of node_id
+ type: list
+ elements: str
+ aliases: ['drive_id']
+
+ state:
+ description:
+ - Element SW Storage Drive operation state.
+ - present - To add drive of node to participate in cluster data storage.
+ - absent - To remove the drive from being part of active cluster.
+ - clean - Clean-up any residual data persistent on a *removed* drive in a secured method.
+ choices: ['present', 'absent', 'clean']
+ default: 'present'
+ type: str
+
+ node_ids:
+ description:
+ - List of IDs or Names of cluster nodes.
+ - If node_ids and drive_ids are not specified, all available drives in the cluster are added if state is present.
+ - If node_ids and drive_ids are not specified, all active drives in the cluster are removed if state is absent.
+ required: false
+ type: list
+ elements: str
+ aliases: ['node_id']
+
+ force_during_upgrade:
+ description:
+ - Flag to force drive operation during upgrade.
+ - Not supported with latest version of SolidFire SDK (1.7.0.152)
+ type: 'bool'
+
+ force_during_bin_sync:
+ description:
+ - Flag to force during a bin sync operation.
+ - Not supported with latest version of SolidFire SDK (1.7.0.152)
+ type: 'bool'
+'''
+
+EXAMPLES = """
+ - name: Add drive with status available to cluster
+ tags:
+ - elementsw_add_drive
+ na_elementsw_drive:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ drive_ids: scsi-SATA_SAMSUNG_MZ7LM48S2UJNX0J3221807
+ force_during_upgrade: false
+ force_during_bin_sync: false
+ node_ids: sf4805-meg-03
+
+ - name: Remove active drive from cluster
+ tags:
+ - elementsw_remove_drive
+ na_elementsw_drive:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ force_during_upgrade: false
+ drive_ids: scsi-SATA_SAMSUNG_MZ7LM48S2UJNX0J321208
+
+ - name: Secure Erase drive
+ tags:
+ - elemensw_clean_drive
+ na_elementsw_drive:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: clean
+ drive_ids: scsi-SATA_SAMSUNG_MZ7LM48S2UJNX0J432109
+ node_ids: sf4805-meg-03
+
+ - name: Add all the drives of all nodes to cluster
+ tags:
+ - elementsw_add_node
+ na_elementsw_drive:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ force_during_upgrade: false
+ force_during_bin_sync: false
+
+"""
+
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementSWDrive(object):
+ """
+ Element Software Storage Drive operations
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent', 'clean'], default='present'),
+ drive_ids=dict(required=False, type='list', elements='str', aliases=['drive_id']),
+ node_ids=dict(required=False, type='list', elements='str', aliases=['node_id']),
+ force_during_upgrade=dict(required=False, type='bool'),
+ force_during_bin_sync=dict(required=False, type='bool')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ input_params = self.module.params
+
+ self.state = input_params['state']
+ self.drive_ids = input_params['drive_ids']
+ self.node_ids = input_params['node_ids']
+ self.force_during_upgrade = input_params['force_during_upgrade']
+ self.force_during_bin_sync = input_params['force_during_bin_sync']
+ self.list_nodes = None
+ self.debug = list()
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(
+ msg="Unable to import the SolidFire Python SDK")
+ else:
+ # increase timeout, as removing a disk takes some time
+ self.sfe = netapp_utils.create_sf_connection(module=self.module, timeout=120)
+
+ def get_node_id(self, node_id):
+ """
+ Get Node ID
+ :description: Find and retrieve node_id from the active cluster
+
+ :return: node_id (None if not found)
+ :rtype: node_id
+ """
+ if self.list_nodes is None:
+ self.list_nodes = self.sfe.list_active_nodes()
+ for current_node in self.list_nodes.nodes:
+ if node_id == str(current_node.node_id):
+ return current_node.node_id
+ elif node_id == current_node.name:
+ return current_node.node_id
+ self.module.fail_json(msg='unable to find node for node_id=%s' % node_id)
+
+ def get_drives_listby_status(self, node_num_ids):
+ """
+ Capture list of drives based on status for a given node_id
+ :description: Capture list of active, failed and available drives from a given node_id
+
+ :return: None
+ """
+ self.active_drives = dict()
+ self.available_drives = dict()
+ self.other_drives = dict()
+ self.all_drives = self.sfe.list_drives()
+
+ for drive in self.all_drives.drives:
+ # get all drives if no node is given, or match the node_ids
+ if node_num_ids is None or drive.node_id in node_num_ids:
+ if drive.status in ['active', 'failed']:
+ self.active_drives[drive.serial] = drive.drive_id
+ elif drive.status == "available":
+ self.available_drives[drive.serial] = drive.drive_id
+ else:
+ self.other_drives[drive.serial] = (drive.drive_id, drive.status)
+
+ self.debug.append('available: %s' % self.available_drives)
+ self.debug.append('active: %s' % self.active_drives)
+ self.debug.append('other: %s' % self.other_drives)
+
+ def get_drive_id(self, drive_id, node_num_ids):
+ """
+ Get Drive ID
+ :description: Find and retrieve drive_id from the active cluster
+ Assumes self.all_drives is already populated
+
+ :return: node_id (None if not found)
+ :rtype: node_id
+ """
+ for drive in self.all_drives.drives:
+ if drive_id == str(drive.drive_id):
+ break
+ if drive_id == drive.serial:
+ break
+ else:
+ self.module.fail_json(msg='unable to find drive for drive_id=%s. Debug=%s' % (drive_id, self.debug))
+ if node_num_ids and drive.node_id not in node_num_ids:
+ self.module.fail_json(msg='drive for drive_id=%s belongs to another node, with node_id=%d. Debug=%s' % (drive_id, drive.node_id, self.debug))
+ return drive.drive_id, drive.status
+
+ def get_active_drives(self, drives):
+ """
+ return a list of active drives
+ if drives is specified, only [] or a subset of disks in drives are returned
+ else all available drives for this node or cluster are returned
+ """
+ if drives is None:
+ return list(self.active_drives.values())
+ return [drive_id for drive_id, status in drives if status in ['active', 'failed']]
+
+ def get_available_drives(self, drives, action):
+ """
+ return a list of available drives (not active)
+ if drives is specified, only [] or a subset of disks in drives are returned
+ else all available drives for this node or cluster are returned
+ """
+ if drives is None:
+ return list(self.available_drives.values())
+ action_list = list()
+ for drive_id, drive_status in drives:
+ if drive_status == 'available':
+ action_list.append(drive_id)
+ elif drive_status in ['active', 'failed']:
+ # already added
+ pass
+ elif drive_status == 'erasing' and action == 'erase':
+ # already erasing
+ pass
+ elif drive_status == 'removing':
+ self.module.fail_json(msg='Error - cannot %s drive while it is being removed. Debug: %s' % (action, self.debug))
+ elif drive_status == 'erasing' and action == 'add':
+ self.module.fail_json(msg='Error - cannot %s drive while it is being erased. Debug: %s' % (action, self.debug))
+ else:
+ self.module.fail_json(msg='Error - cannot %s drive while it is in %s state. Debug: %s' % (action, drive_status, self.debug))
+ return action_list
+
+ def add_drive(self, drives=None):
+ """
+ Add Drive available for Cluster storage expansion
+ """
+ kwargs = dict()
+ if self.force_during_upgrade is not None:
+ kwargs['force_during_upgrade'] = self.force_during_upgrade
+ if self.force_during_bin_sync is not None:
+ kwargs['force_during_bin_sync'] = self.force_during_bin_sync
+ try:
+ self.sfe.add_drives(drives, **kwargs)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error adding drive%s: %s: %s' %
+ ('s' if len(drives) > 1 else '',
+ str(drives),
+ to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def remove_drive(self, drives=None):
+ """
+ Remove Drive active in Cluster
+ """
+ kwargs = dict()
+ if self.force_during_upgrade is not None:
+ kwargs['force_during_upgrade'] = self.force_during_upgrade
+ try:
+ self.sfe.remove_drives(drives, **kwargs)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error removing drive%s: %s: %s' %
+ ('s' if len(drives) > 1 else '',
+ str(drives),
+ to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def secure_erase(self, drives=None):
+ """
+ Secure Erase any residual data existing on a drive
+ """
+ try:
+ self.sfe.secure_erase_drives(drives)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error cleaning data from drive%s: %s: %s' %
+ ('s' if len(drives) > 1 else '',
+ str(drives),
+ to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Check, process and initiate Drive operation
+ """
+ changed = False
+
+ action_list = []
+ node_num_ids = None
+ drives = None
+ if self.node_ids:
+ node_num_ids = [self.get_node_id(node_id) for node_id in self.node_ids]
+
+ self.get_drives_listby_status(node_num_ids)
+ if self.drive_ids:
+ drives = [self.get_drive_id(drive_id, node_num_ids) for drive_id in self.drive_ids]
+
+ if self.state == "present":
+ action_list = self.get_available_drives(drives, 'add')
+ elif self.state == "absent":
+ action_list = self.get_active_drives(drives)
+ elif self.state == "clean":
+ action_list = self.get_available_drives(drives, 'erase')
+
+ if len(action_list) > 0:
+ changed = True
+ if not self.module.check_mode and changed:
+ if self.state == "present":
+ self.add_drive(action_list)
+ elif self.state == "absent":
+ self.remove_drive(action_list)
+ elif self.state == "clean":
+ self.secure_erase(action_list)
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """
+ Main function
+ """
+
+ na_elementsw_drive = ElementSWDrive()
+ na_elementsw_drive.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_info.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_info.py
new file mode 100644
index 000000000..fde928784
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_info.py
@@ -0,0 +1,272 @@
+#!/usr/bin/python
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software Info
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_info
+short_description: NetApp Element Software Info
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 20.10.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Collect cluster and node information.
+ - Use a MVIP as hostname for cluster and node scope.
+ - Use a MIP as hostname for node scope.
+ - When using MIPs, cluster APIs are expected to fail with 'xUnknownAPIMethod method=ListAccounts'
+
+options:
+ gather_subsets:
+ description:
+ - list of subsets to gather from target cluster or node
+ - supported values
+ - node_config, cluster_accounts, cluster_nodes, cluster_drives.
+ - additional values
+ - all - for all subsets,
+ - all_clusters - all subsets at cluster scope,
+ - all_nodes - all subsets at node scope
+ type: list
+ elements: str
+ default: ['all']
+ aliases: ['gather_subset']
+
+ filter:
+ description:
+ - When a list of records is returned, this can be used to limit the records to be returned.
+ - If more than one key is used, all keys must match.
+ type: dict
+
+ fail_on_error:
+ description:
+ - by default, errors are not fatal when collecting a subset. The subset will show on error in the info output.
+ - if set to True, the module fails on the first error.
+ type: bool
+ default: false
+
+ fail_on_key_not_found:
+ description:
+ - force an error when filter is used and a key is not present in records.
+ type: bool
+ default: true
+
+ fail_on_record_not_found:
+ description:
+ - force an error when filter is used and no record is matched.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = """
+
+ - name: get all available subsets
+ na_elementsw_info:
+ hostname: "{{ elementsw_mvip }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ gather_subsets: all
+ register: result
+
+ - name: collect data for elementsw accounts using a filter
+ na_elementsw_info:
+ hostname: "{{ elementsw_mvip }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ gather_subsets: 'cluster_accounts'
+ filter:
+ username: "{{ username_to_find }}"
+ register: result
+"""
+
+RETURN = """
+
+info:
+ description:
+ - a dictionary of collected subsets
+ - each subset if in JSON format
+ returned: success
+ type: dict
+
+debug:
+ description:
+ - a list of detailed error messages if some subsets cannot be collected
+ returned: success
+ type: list
+
+"""
+from ansible.module_utils.basic import AnsibleModule
+
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementSWInfo(object):
+ '''
+ Element Software Initialize node with ownership for cluster formation
+ '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ gather_subsets=dict(type='list', elements='str', aliases=['gather_subset'], default='all'),
+ filter=dict(type='dict'),
+ fail_on_error=dict(type='bool', default=False),
+ fail_on_key_not_found=dict(type='bool', default=True),
+ fail_on_record_not_found=dict(type='bool', default=False),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.debug = list()
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+
+ # 442 for node APIs, 443 (default) for cluster APIs
+ for role, port in [('node', 442), ('cluster', 443)]:
+ try:
+ conn = netapp_utils.create_sf_connection(module=self.module, raise_on_connection_error=True, port=port)
+ if role == 'node':
+ self.sfe_node = conn
+ else:
+ self.sfe_cluster = conn
+ except netapp_utils.solidfire.common.ApiConnectionError as exc:
+ if str(exc) == "Bad Credentials":
+ msg = ' Make sure to use valid %s credentials for username and password.' % 'node' if port == 442 else 'cluster'
+ msg += '%s reported: %s' % ('Node' if port == 442 else 'Cluster', repr(exc))
+ else:
+ msg = 'Failed to create connection for %s:%d - %s' % (self.parameters['hostname'], port, repr(exc))
+ self.module.fail_json(msg=msg)
+ except Exception as exc:
+ self.module.fail_json(msg='Failed to connect for %s:%d - %s' % (self.parameters['hostname'], port, repr(exc)))
+
+ # TODO: add new node methods here
+ self.node_methods = dict(
+ node_config=self.sfe_node.get_config,
+ )
+ # TODO: add new cluster methods here
+ self.cluster_methods = dict(
+ cluster_accounts=self.sfe_cluster.list_accounts,
+ cluster_drives=self.sfe_cluster.list_drives,
+ cluster_nodes=self.sfe_cluster.list_all_nodes
+ )
+ self.methods = dict(self.node_methods)
+ self.methods.update(self.cluster_methods)
+
+ # add telemetry attributes - does not matter if we are using cluster or node here
+ # TODO: most if not all get and list APIs do not have an attributes parameter
+
+ def get_info(self, name):
+ '''
+ Get Element Info
+ run a cluster or node list method
+ return output as json
+ '''
+ info = None
+ if name not in self.methods:
+ msg = 'Error: unknown subset %s.' % name
+ msg += ' Known_subsets: %s' % ', '.join(self.methods.keys())
+ self.module.fail_json(msg=msg, debug=self.debug)
+ try:
+ info = self.methods[name]()
+ return info.to_json()
+ except netapp_utils.solidfire.common.ApiServerError as exc:
+ # the new SDK rearranged the fields in a different order
+ if all(x in str(exc) for x in ('err_json', '500', 'xUnknownAPIMethod', 'method=')):
+ info = 'Error (API not in scope?)'
+ else:
+ info = 'Error'
+ msg = '%s for subset: %s: %s' % (info, name, repr(exc))
+ if self.parameters['fail_on_error']:
+ self.module.fail_json(msg=msg)
+ self.debug.append(msg)
+ return info
+
+ def filter_list_of_dict_by_key(self, records, key, value):
+ matched = list()
+ for record in records:
+ if key in record and record[key] == value:
+ matched.append(record)
+ if key not in record and self.parameters['fail_on_key_not_found']:
+ msg = 'Error: key %s not found in %s' % (key, repr(record))
+ self.module.fail_json(msg=msg)
+ return matched
+
+ def filter_records(self, records, filter_dict):
+
+ if isinstance(records, dict):
+ if len(records) == 1:
+ key, value = list(records.items())[0]
+ return dict({key: self.filter_records(value, filter_dict)})
+ if not isinstance(records, list):
+ return records
+ matched = records
+ for key, value in filter_dict.items():
+ matched = self.filter_list_of_dict_by_key(matched, key, value)
+ if self.parameters['fail_on_record_not_found'] and len(matched) == 0:
+ msg = 'Error: no match for %s out of %d records' % (repr(self.parameters['filter']), len(records))
+ self.debug.append('Unmatched records: %s' % repr(records))
+ self.module.fail_json(msg=msg, debug=self.debug)
+ return matched
+
+ def get_and_filter_info(self, name):
+ '''
+ Get data
+ If filter is present, only return the records that are matched
+ return output as json
+ '''
+ records = self.get_info(name)
+ if self.parameters.get('filter') is None:
+ return records
+ matched = self.filter_records(records, self.parameters.get('filter'))
+ return matched
+
+ def apply(self):
+ '''
+ Check connection and initialize node with cluster ownership
+ '''
+ changed = False
+ info = dict()
+ my_subsets = ('all', 'all_clusters', 'all_nodes')
+ if any(x in self.parameters['gather_subsets'] for x in my_subsets) and len(self.parameters['gather_subsets']) > 1:
+ msg = 'When any of %s is used, no other subset is allowed' % repr(my_subsets)
+ self.module.fail_json(msg=msg)
+ if 'all' in self.parameters['gather_subsets']:
+ self.parameters['gather_subsets'] = self.methods.keys()
+ if 'all_clusters' in self.parameters['gather_subsets']:
+ self.parameters['gather_subsets'] = self.cluster_methods.keys()
+ if 'all_nodes' in self.parameters['gather_subsets']:
+ self.parameters['gather_subsets'] = self.node_methods.keys()
+ for name in self.parameters['gather_subsets']:
+ info[name] = self.get_and_filter_info(name)
+ self.module.exit_json(changed=changed, info=info, debug=self.debug)
+
+
+def main():
+ '''
+ Main function
+ '''
+ na_elementsw_cluster = ElementSWInfo()
+ na_elementsw_cluster.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_initiators.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_initiators.py
new file mode 100644
index 000000000..9bef345b4
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_initiators.py
@@ -0,0 +1,343 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software manage initiators
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_initiators
+
+short_description: Manage Element SW initiators
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.8.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Manage Element Software initiators that allow external clients access to volumes.
+
+options:
+ initiators:
+ description: A list of objects containing characteristics of each initiator.
+ suboptions:
+ name:
+ description: The name of the initiator.
+ type: str
+ required: true
+
+ alias:
+ description: The friendly name assigned to this initiator.
+ type: str
+
+ initiator_id:
+ description: The numeric ID of the initiator.
+ type: int
+
+ volume_access_group_id:
+ description: volumeAccessGroupID to which this initiator belongs.
+ type: int
+
+ attributes:
+ description: A set of JSON attributes to assign to this initiator.
+ type: dict
+ type: list
+ elements: dict
+
+ state:
+ description:
+ - Whether the specified initiator should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+'''
+
+EXAMPLES = """
+
+ - name: Manage initiators
+ tags:
+ - na_elementsw_initiators
+ na_elementsw_initiators:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ initiators:
+ - name: a
+ alias: a1
+ initiator_id: 1
+ volume_access_group_id: 1
+ attributes: {"key": "value"}
+ - name: b
+ alias: b2
+ initiator_id: 2
+ volume_access_group_id: 2
+ state: present
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+if HAS_SF_SDK:
+ from solidfire.models import ModifyInitiator
+
+
+class ElementSWInitiators(object):
+ """
+ Element Software Manage Element SW initiators
+ """
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+
+ self.argument_spec.update(dict(
+ initiators=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ name=dict(type='str', required=True),
+ alias=dict(type='str', default=None),
+ initiator_id=dict(type='int', default=None),
+ volume_access_group_id=dict(type='int', default=None),
+ attributes=dict(type='dict', default=None),
+ )
+ ),
+ state=dict(choices=['present', 'absent'], default='present'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.debug = list()
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # iterate over each user-provided initiator
+ for initiator in self.parameters.get('initiators'):
+ # add telemetry attributes
+ if 'attributes' in initiator and initiator['attributes']:
+ initiator['attributes'].update(self.elementsw_helper.set_element_attributes(source='na_elementsw_initiators'))
+ else:
+ initiator['attributes'] = self.elementsw_helper.set_element_attributes(source='na_elementsw_initiators')
+
+ def compare_initiators(self, user_initiator, existing_initiator):
+ """
+ compare user input initiator with existing dict
+ :return: True if matched, False otherwise
+ """
+ if user_initiator is None or existing_initiator is None:
+ return False
+ changed = False
+ for param in user_initiator:
+ # lookup initiator_name instead of name
+ if param == 'name':
+ if user_initiator['name'] == existing_initiator['initiator_name']:
+ pass
+ elif param == 'initiator_id':
+ # can't change the key
+ pass
+ elif user_initiator[param] == existing_initiator[param]:
+ pass
+ else:
+ self.debug.append('Initiator: %s. Changed: %s from: %s to %s' %
+ (user_initiator['name'], param, str(existing_initiator[param]), str(user_initiator[param])))
+ changed = True
+ return changed
+
+ def initiator_to_dict(self, initiator_obj):
+ """
+ converts initiator class object to dict
+ :return: reconstructed initiator dict
+ """
+ known_params = ['initiator_name',
+ 'alias',
+ 'initiator_id',
+ 'volume_access_groups',
+ 'volume_access_group_id',
+ 'attributes']
+ initiator_dict = {}
+
+ # missing parameter cause error
+ # so assign defaults
+ for param in known_params:
+ initiator_dict[param] = getattr(initiator_obj, param, None)
+ if initiator_dict['volume_access_groups'] is not None:
+ if len(initiator_dict['volume_access_groups']) == 1:
+ initiator_dict['volume_access_group_id'] = initiator_dict['volume_access_groups'][0]
+ elif len(initiator_dict['volume_access_groups']) > 1:
+ self.module.fail_json(msg="Only 1 access group is supported, found: %s" % repr(initiator_obj))
+ del initiator_dict['volume_access_groups']
+ return initiator_dict
+
+ def find_initiator(self, id=None, name=None):
+ """
+ find a specific initiator
+ :return: initiator dict
+ """
+ initiator_details = None
+ if self.all_existing_initiators is None:
+ return initiator_details
+ for initiator in self.all_existing_initiators:
+ # if name is provided or
+ # if id is provided
+ if name is not None:
+ if initiator.initiator_name == name:
+ initiator_details = self.initiator_to_dict(initiator)
+ elif id is not None:
+ if initiator.initiator_id == id:
+ initiator_details = self.initiator_to_dict(initiator)
+ else:
+ # if neither id nor name provided
+ # return everything
+ initiator_details = self.all_existing_initiators
+ return initiator_details
+
+ @staticmethod
+ def rename_key(obj, old_name, new_name):
+ obj[new_name] = obj.pop(old_name)
+
+ def create_initiator(self, initiator):
+ """
+ create initiator
+ """
+ # SF SDK is using camelCase for this one
+ self.rename_key(initiator, 'volume_access_group_id', 'volumeAccessGroupID')
+ # create_initiators needs an array
+ initiator_list = [initiator]
+ try:
+ self.sfe.create_initiators(initiator_list)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error creating initiator %s' % (to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def delete_initiator(self, initiator):
+ """
+ delete initiator
+ """
+ # delete_initiators needs an array
+ initiator_id_array = [initiator]
+ try:
+ self.sfe.delete_initiators(initiator_id_array)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error deleting initiator %s' % (to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def modify_initiator(self, initiator, existing_initiator):
+ """
+ modify initiator
+ """
+ # create the new initiator dict
+ # by merging old and new values
+ merged_initiator = existing_initiator.copy()
+ # can't change the key
+ del initiator['initiator_id']
+ merged_initiator.update(initiator)
+
+ # we MUST create an object before sending
+ # the new initiator to modify_initiator
+ initiator_object = ModifyInitiator(initiator_id=merged_initiator['initiator_id'],
+ alias=merged_initiator['alias'],
+ volume_access_group_id=merged_initiator['volume_access_group_id'],
+ attributes=merged_initiator['attributes'])
+ initiator_list = [initiator_object]
+ try:
+ self.sfe.modify_initiators(initiators=initiator_list)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error modifying initiator: %s' % (to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ configure initiators
+ """
+ changed = False
+ result_message = None
+
+ # get all user provided initiators
+ input_initiators = self.parameters.get('initiators')
+
+ # get all initiators
+ # store in a cache variable
+ self.all_existing_initiators = self.sfe.list_initiators().initiators
+
+ # iterate over each user-provided initiator
+ for in_initiator in input_initiators:
+ if self.parameters.get('state') == 'present':
+ # check if initiator_id is provided and exists
+ if 'initiator_id' in in_initiator and in_initiator['initiator_id'] is not None and \
+ self.find_initiator(id=in_initiator['initiator_id']) is not None:
+ if self.compare_initiators(in_initiator, self.find_initiator(id=in_initiator['initiator_id'])):
+ changed = True
+ result_message = 'modifying initiator(s)'
+ self.modify_initiator(in_initiator, self.find_initiator(id=in_initiator['initiator_id']))
+ # otherwise check if name is provided and exists
+ elif 'name' in in_initiator and in_initiator['name'] is not None and self.find_initiator(name=in_initiator['name']) is not None:
+ if self.compare_initiators(in_initiator, self.find_initiator(name=in_initiator['name'])):
+ changed = True
+ result_message = 'modifying initiator(s)'
+ self.modify_initiator(in_initiator, self.find_initiator(name=in_initiator['name']))
+ # this is a create op if initiator doesn't exist
+ else:
+ changed = True
+ result_message = 'creating initiator(s)'
+ self.create_initiator(in_initiator)
+ elif self.parameters.get('state') == 'absent':
+ # delete_initiators only processes ids
+ # so pass ids of initiators to method
+ if 'name' in in_initiator and in_initiator['name'] is not None and \
+ self.find_initiator(name=in_initiator['name']) is not None:
+ changed = True
+ result_message = 'deleting initiator(s)'
+ self.delete_initiator(self.find_initiator(name=in_initiator['name'])['initiator_id'])
+ elif 'initiator_id' in in_initiator and in_initiator['initiator_id'] is not None and \
+ self.find_initiator(id=in_initiator['initiator_id']) is not None:
+ changed = True
+ result_message = 'deleting initiator(s)'
+ self.delete_initiator(in_initiator['initiator_id'])
+ if self.module.check_mode is True:
+ result_message = "Check mode, skipping changes"
+ if self.debug:
+ result_message += ". %s" % self.debug
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_initiators = ElementSWInitiators()
+ na_elementsw_initiators.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_ldap.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_ldap.py
new file mode 100644
index 000000000..a71ddf564
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_ldap.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_ldap
+
+short_description: NetApp Element Software Manage ldap admin users
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Enable, disable ldap, and add ldap users
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ authType:
+ description:
+ - Identifies which user authentication method to use.
+ choices: ['DirectBind', 'SearchAndBind']
+ type: str
+
+ groupSearchBaseDn:
+ description:
+ - The base DN of the tree to start the group search (will do a subtree search from here)
+ type: str
+
+ groupSearchType:
+ description:
+ - Controls the default group search filter used
+ choices: ['NoGroup', 'ActiveDirectory', 'MemberDN']
+ type: str
+
+ serverURIs:
+ description:
+ - A comma-separated list of LDAP server URIs
+ type: str
+
+ userSearchBaseDN:
+ description:
+ - The base DN of the tree to start the search (will do a subtree search from here)
+ type: str
+
+ searchBindDN:
+ description:
+ - A dully qualified DN to log in with to perform an LDAp search for the user (needs read access to the LDAP directory).
+ type: str
+
+ searchBindPassword:
+ description:
+ - The password for the searchBindDN account used for searching
+ type: str
+
+ userSearchFilter:
+ description:
+ - the LDAP Filter to use
+ type: str
+
+ userDNTemplate:
+ description:
+ - A string that is used form a fully qualified user DN.
+ type: str
+
+ groupSearchCustomFilter:
+ description:
+ - For use with the CustomFilter Search type
+ type: str
+'''
+
+EXAMPLES = """
+ - name: disable ldap authentication
+ na_elementsw_ldap:
+ state: absent
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ hostname: "{{ hostname }}"
+
+ - name: Enable ldap authentication
+ na_elementsw_ldap:
+ state: present
+ username: "{{ admin username }}"
+ password: "{{ admin password }}"
+ hostname: "{{ hostname }}"
+ authType: DirectBind
+ serverURIs: ldap://svmdurlabesx01spd_ldapclnt
+ groupSearchType: MemberDN
+ userDNTemplate: uid=%USERNAME%,cn=users,cn=accounts,dc=corp,dc="{{ company name }}",dc=com
+
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except Exception:
+ HAS_SF_SDK = False
+
+
+class NetappElementLdap(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ authType=dict(type='str', choices=['DirectBind', 'SearchAndBind']),
+ groupSearchBaseDn=dict(type='str'),
+ groupSearchType=dict(type='str', choices=['NoGroup', 'ActiveDirectory', 'MemberDN']),
+ serverURIs=dict(type='str'),
+ userSearchBaseDN=dict(type='str'),
+ searchBindDN=dict(type='str'),
+ searchBindPassword=dict(type='str', no_log=True),
+ userSearchFilter=dict(type='str'),
+ userDNTemplate=dict(type='str'),
+ groupSearchCustomFilter=dict(type='str'),
+ )
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+
+ param = self.module.params
+
+ # set up state variables
+ self.state = param['state']
+ self.authType = param['authType']
+ self.groupSearchBaseDn = param['groupSearchBaseDn']
+ self.groupSearchType = param['groupSearchType']
+ self.serverURIs = param['serverURIs']
+ if self.serverURIs is not None:
+ self.serverURIs = self.serverURIs.split(',')
+ self.userSearchBaseDN = param['userSearchBaseDN']
+ self.searchBindDN = param['searchBindDN']
+ self.searchBindPassword = param['searchBindPassword']
+ self.userSearchFilter = param['userSearchFilter']
+ self.userDNTemplate = param['userDNTemplate']
+ self.groupSearchCustomFilter = param['groupSearchCustomFilter']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_ldap_configuration(self):
+ """
+ Return ldap configuration if found
+
+ :return: Details about the ldap configuration. None if not found.
+ :rtype: solidfire.models.GetLdapConfigurationResult
+ """
+ ldap_config = self.sfe.get_ldap_configuration()
+ return ldap_config
+
+ def enable_ldap(self):
+ """
+ Enable LDAP
+ :return: nothing
+ """
+ try:
+ self.sfe.enable_ldap_authentication(self.serverURIs, auth_type=self.authType,
+ group_search_base_dn=self.groupSearchBaseDn,
+ group_search_type=self.groupSearchType,
+ group_search_custom_filter=self.groupSearchCustomFilter,
+ search_bind_dn=self.searchBindDN,
+ search_bind_password=self.searchBindPassword,
+ user_search_base_dn=self.userSearchBaseDN,
+ user_search_filter=self.userSearchFilter,
+ user_dntemplate=self.userDNTemplate)
+ except solidfire.common.ApiServerError as error:
+ self.module.fail_json(msg='Error enabling LDAP: %s' % (to_native(error)),
+ exception=traceback.format_exc())
+
+ def check_config(self, ldap_config):
+ """
+ Check to see if the ldap config has been modified.
+ :param ldap_config: The LDAP configuration
+ :return: False if the config is the same as the playbook, True if it is not
+ """
+ if self.authType != ldap_config.ldap_configuration.auth_type:
+ return True
+ if self.serverURIs != ldap_config.ldap_configuration.server_uris:
+ return True
+ if self.groupSearchBaseDn != ldap_config.ldap_configuration.group_search_base_dn:
+ return True
+ if self.groupSearchType != ldap_config.ldap_configuration.group_search_type:
+ return True
+ if self.groupSearchCustomFilter != ldap_config.ldap_configuration.group_search_custom_filter:
+ return True
+ if self.searchBindDN != ldap_config.ldap_configuration.search_bind_dn:
+ return True
+ if self.searchBindPassword != ldap_config.ldap_configuration.search_bind_password:
+ return True
+ if self.userSearchBaseDN != ldap_config.ldap_configuration.user_search_base_dn:
+ return True
+ if self.userSearchFilter != ldap_config.ldap_configuration.user_search_filter:
+ return True
+ if self.userDNTemplate != ldap_config.ldap_configuration.user_dntemplate:
+ return True
+ return False
+
+ def apply(self):
+ changed = False
+ ldap_config = self.get_ldap_configuration()
+ if self.state == 'absent':
+ if ldap_config and ldap_config.ldap_configuration.enabled:
+ changed = True
+ if self.state == 'present' and self.check_config(ldap_config):
+ changed = True
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ self.enable_ldap()
+ elif self.state == 'absent':
+ self.sfe.disable_ldap_authentication()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetappElementLdap()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_network_interfaces.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_network_interfaces.py
new file mode 100644
index 000000000..a9151a620
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_network_interfaces.py
@@ -0,0 +1,423 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software Node Network Interfaces - Bond 1G and 10G configuration
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_network_interfaces
+
+short_description: NetApp Element Software Configure Node Network Interfaces
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Configure Element SW Node Network Interfaces for Bond 1G and 10G IP addresses.
+ - This module does not create interfaces, it expects the interfaces to already exists and can only modify them.
+ - This module cannot set or modify the method (Loopback, manual, dhcp, static).
+ - This module is not idempotent and does not support check_mode.
+
+options:
+ method:
+ description:
+ - deprecated, this option would trigger a 'updated failed' error
+ type: str
+
+ ip_address_1g:
+ description:
+ - deprecated, use bond_1g option.
+ type: str
+
+ ip_address_10g:
+ description:
+ - deprecated, use bond_10g option.
+ type: str
+
+ subnet_1g:
+ description:
+ - deprecated, use bond_1g option.
+ type: str
+
+ subnet_10g:
+ description:
+ - deprecated, use bond_10g option.
+ type: str
+
+ gateway_address_1g:
+ description:
+ - deprecated, use bond_1g option.
+ type: str
+
+ gateway_address_10g:
+ description:
+ - deprecated, use bond_10g option.
+ type: str
+
+ mtu_1g:
+ description:
+ - deprecated, use bond_1g option.
+ type: str
+
+ mtu_10g:
+ description:
+ - deprecated, use bond_10g option.
+ type: str
+
+ dns_nameservers:
+ description:
+ - deprecated, use bond_1g and bond_10g options.
+ type: list
+ elements: str
+
+ dns_search_domains:
+ description:
+ - deprecated, use bond_1g and bond_10g options.
+ type: list
+ elements: str
+
+ bond_mode_1g:
+ description:
+ - deprecated, use bond_1g option.
+ type: str
+
+ bond_mode_10g:
+ description:
+ - deprecated, use bond_10g option.
+ type: str
+
+ lacp_1g:
+ description:
+ - deprecated, use bond_1g option.
+ type: str
+
+ lacp_10g:
+ description:
+ - deprecated, use bond_10g option.
+ type: str
+
+ virtual_network_tag:
+ description:
+ - deprecated, use bond_1g and bond_10g options.
+ type: str
+
+ bond_1g:
+ description:
+ - settings for the Bond1G interface.
+ type: dict
+ suboptions:
+ address:
+ description:
+ - IP address for the interface.
+ type: str
+ netmask:
+ description:
+ - subnet mask for the interface.
+ type: str
+ gateway:
+ description:
+ - IP router network address to send packets out of the local network.
+ type: str
+ mtu:
+ description:
+ - The largest packet size (in bytes) that the interface can transmit..
+ - Must be greater than or equal to 1500 bytes.
+ type: str
+ dns_nameservers:
+ description:
+ - List of addresses for domain name servers.
+ type: list
+ elements: str
+ dns_search:
+ description:
+ - List of DNS search domains.
+ type: list
+ elements: str
+ bond_mode:
+ description:
+ - Bonding mode.
+ choices: ['ActivePassive', 'ALB', 'LACP']
+ type: str
+ bond_lacp_rate:
+ description:
+ - Link Aggregation Control Protocol - useful only if LACP is selected as the Bond Mode.
+ - Slow - Packets are transmitted at 30 second intervals.
+ - Fast - Packets are transmitted in 1 second intervals.
+ choices: ['Fast', 'Slow']
+ type: str
+ virtual_network_tag:
+ description:
+ - The virtual network identifier of the interface (VLAN tag).
+ type: str
+
+ bond_10g:
+ description:
+ - settings for the Bond10G interface.
+ type: dict
+ suboptions:
+ address:
+ description:
+ - IP address for the interface.
+ type: str
+ netmask:
+ description:
+ - subnet mask for the interface.
+ type: str
+ gateway:
+ description:
+ - IP router network address to send packets out of the local network.
+ type: str
+ mtu:
+ description:
+ - The largest packet size (in bytes) that the interface can transmit..
+ - Must be greater than or equal to 1500 bytes.
+ type: str
+ dns_nameservers:
+ description:
+ - List of addresses for domain name servers.
+ type: list
+ elements: str
+ dns_search:
+ description:
+ - List of DNS search domains.
+ type: list
+ elements: str
+ bond_mode:
+ description:
+ - Bonding mode.
+ choices: ['ActivePassive', 'ALB', 'LACP']
+ type: str
+ bond_lacp_rate:
+ description:
+ - Link Aggregation Control Protocol - useful only if LACP is selected as the Bond Mode.
+ - Slow - Packets are transmitted at 30 second intervals.
+ - Fast - Packets are transmitted in 1 second intervals.
+ choices: ['Fast', 'Slow']
+ type: str
+ virtual_network_tag:
+ description:
+ - The virtual network identifier of the interface (VLAN tag).
+ type: str
+
+'''
+
+EXAMPLES = """
+
+ - name: Set Node network interfaces configuration for Bond 1G and 10G properties
+ tags:
+ - elementsw_network_interfaces
+ na_elementsw_network_interfaces:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ bond_1g:
+ address: 10.253.168.131
+ netmask: 255.255.248.0
+ gateway: 10.253.168.1
+ mtu: '1500'
+ bond_mode: ActivePassive
+ dns_nameservers: dns1,dns2
+ dns_search: domain1,domain2
+ bond_10g:
+ address: 10.253.1.202
+ netmask: 255.255.255.192
+ gateway: 10.253.1.193
+ mtu: '9000'
+ bond_mode: LACP
+ bond_lacp_rate: Fast
+ virtual_network_tag: vnet_tag
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+try:
+ from solidfire.models import Network, NetworkConfig
+ from solidfire.common import ApiConnectionError as sf_ApiConnectionError, ApiServerError as sf_ApiServerError
+ HAS_SF_SDK = True
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWNetworkInterfaces(object):
+ """
+ Element Software Network Interfaces - Bond 1G and 10G Network configuration
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ method=dict(required=False, type='str'),
+ ip_address_1g=dict(required=False, type='str'),
+ ip_address_10g=dict(required=False, type='str'),
+ subnet_1g=dict(required=False, type='str'),
+ subnet_10g=dict(required=False, type='str'),
+ gateway_address_1g=dict(required=False, type='str'),
+ gateway_address_10g=dict(required=False, type='str'),
+ mtu_1g=dict(required=False, type='str'),
+ mtu_10g=dict(required=False, type='str'),
+ dns_nameservers=dict(required=False, type='list', elements='str'),
+ dns_search_domains=dict(required=False, type='list', elements='str'),
+ bond_mode_1g=dict(required=False, type='str'),
+ bond_mode_10g=dict(required=False, type='str'),
+ lacp_1g=dict(required=False, type='str'),
+ lacp_10g=dict(required=False, type='str'),
+ virtual_network_tag=dict(required=False, type='str'),
+ bond_1g=dict(required=False, type='dict', options=dict(
+ address=dict(required=False, type='str'),
+ netmask=dict(required=False, type='str'),
+ gateway=dict(required=False, type='str'),
+ mtu=dict(required=False, type='str'),
+ dns_nameservers=dict(required=False, type='list', elements='str'),
+ dns_search=dict(required=False, type='list', elements='str'),
+ bond_mode=dict(required=False, type='str', choices=['ActivePassive', 'ALB', 'LACP']),
+ bond_lacp_rate=dict(required=False, type='str', choices=['Fast', 'Slow']),
+ virtual_network_tag=dict(required=False, type='str'),
+ )),
+ bond_10g=dict(required=False, type='dict', options=dict(
+ address=dict(required=False, type='str'),
+ netmask=dict(required=False, type='str'),
+ gateway=dict(required=False, type='str'),
+ mtu=dict(required=False, type='str'),
+ dns_nameservers=dict(required=False, type='list', elements='str'),
+ dns_search=dict(required=False, type='list', elements='str'),
+ bond_mode=dict(required=False, type='str', choices=['ActivePassive', 'ALB', 'LACP']),
+ bond_lacp_rate=dict(required=False, type='str', choices=['Fast', 'Slow']),
+ virtual_network_tag=dict(required=False, type='str'),
+ )),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+
+ input_params = self.module.params
+ self.fail_when_deprecated_options_are_set(input_params)
+
+ self.bond1g = input_params['bond_1g']
+ self.bond10g = input_params['bond_10g']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ # increase time out, as it may take 30 seconds when making a change
+ self.sfe = netapp_utils.create_sf_connection(module=self.module, port=442, timeout=90)
+
+ def fail_when_deprecated_options_are_set(self, input_params):
+ ''' report an error and exit if any deprecated options is set '''
+
+ dparms_1g = [x for x in ('ip_address_1g', 'subnet_1g', 'gateway_address_1g', 'mtu_1g', 'bond_mode_1g', 'lacp_1g')
+ if input_params[x] is not None]
+ dparms_10g = [x for x in ('ip_address_10g', 'subnet_10g', 'gateway_address_10g', 'mtu_10g', 'bond_mode_10g', 'lacp_10g')
+ if input_params[x] is not None]
+ dparms_common = [x for x in ('dns_nameservers', 'dns_search_domains', 'virtual_network_tag')
+ if input_params[x] is not None]
+
+ error_msg = ''
+ if dparms_1g and dparms_10g:
+ error_msg = 'Please use the new bond_1g and bond_10g options to configure the bond interfaces.'
+ elif dparms_1g:
+ error_msg = 'Please use the new bond_1g option to configure the bond 1G interface.'
+ elif dparms_10g:
+ error_msg = 'Please use the new bond_10g option to configure the bond 10G interface.'
+ elif dparms_common:
+ error_msg = 'Please use the new bond_1g or bond_10g options to configure the bond interfaces.'
+ if input_params['method']:
+ error_msg = 'This module cannot set or change "method". ' + error_msg
+ dparms_common.append('method')
+ if error_msg:
+ error_msg += ' The following parameters are deprecated and cannot be used: '
+ dparms = dparms_1g
+ dparms.extend(dparms_10g)
+ dparms.extend(dparms_common)
+ error_msg += ', '.join(dparms)
+ self.module.fail_json(msg=error_msg)
+
+ def set_network_config(self, network_object):
+ """
+ set network configuration
+ """
+ try:
+ self.sfe.set_network_config(network=network_object)
+ except (sf_ApiConnectionError, sf_ApiServerError) as exception_object:
+ self.module.fail_json(msg='Error setting network config for node %s' % (to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def set_network_config_object(self, network_params):
+ ''' set SolidFire network config object '''
+ network_config = dict()
+ if network_params is not None:
+ for key in network_params:
+ if network_params[key] is not None:
+ network_config[key] = network_params[key]
+ if network_config:
+ return NetworkConfig(**network_config)
+ return None
+
+ def set_network_object(self):
+ """
+ Set Element SW Network object
+ :description: set Network object
+
+ :return: Network object
+ :rtype: object(Network object)
+ """
+ bond_1g_network = self.set_network_config_object(self.bond1g)
+ bond_10g_network = self.set_network_config_object(self.bond10g)
+ network_object = None
+ if bond_1g_network is not None or bond_10g_network is not None:
+ network_object = Network(bond1_g=bond_1g_network,
+ bond10_g=bond_10g_network)
+ return network_object
+
+ def apply(self):
+ """
+ Check connection and initialize node with cluster ownership
+ """
+ changed = False
+ result_message = None
+ network_object = self.set_network_object()
+ if network_object is not None:
+ if not self.module.check_mode:
+ self.set_network_config(network_object)
+ changed = True
+ else:
+ result_message = "Skipping changes, No change requested"
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+ elementsw_network_interfaces = ElementSWNetworkInterfaces()
+ elementsw_network_interfaces.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_node.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_node.py
new file mode 100644
index 000000000..d1412f2d4
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_node.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element Software Node Operation
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_node
+
+short_description: NetApp Element Software Node Operation
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Add, remove cluster node on Element Software Cluster.
+ - Set cluster name on node.
+ - When using the preset_only option, hostname/username/password are required but not used.
+
+options:
+ state:
+ description:
+ - Element Software Storage Node operation state.
+ - present - To add pending node to participate in cluster data storage.
+ - absent - To remove node from active cluster. A node cannot be removed if active drives are present.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ node_ids:
+ description:
+ - List of IDs or Names or IP Addresses of nodes to add or remove.
+ - If cluster_name is set, node MIPs are required.
+ type: list
+ elements: str
+ required: true
+ aliases: ['node_id']
+
+ cluster_name:
+ description:
+ - If set, the current node configuration is updated with this name before adding the node to the cluster.
+ - This requires the node_ids to be specified as MIPs (Management IP Adresses)
+ type: str
+ version_added: 20.9.0
+
+ preset_only:
+ description:
+ - If true and state is 'present', set the cluster name for each node in node_ids, but do not add the nodes.
+ - They can be added using na_elementsw_cluster for initial cluster creation.
+ - If false, proceed with addition/removal.
+ type: bool
+ default: false
+ version_added: 20.9.0
+'''
+
+EXAMPLES = """
+ - name: Add node from pending to active cluster
+ tags:
+ - elementsw_add_node
+ na_elementsw_node:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ node_id: sf4805-meg-03
+
+ - name: Remove active node from cluster
+ tags:
+ - elementsw_remove_node
+ na_elementsw_node:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ node_id: 13
+
+ - name: Add node from pending to active cluster using node IP
+ tags:
+ - elementsw_add_node_ip
+ na_elementsw_node:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ node_id: 10.109.48.65
+ cluster_name: sfcluster01
+
+ - name: Only set cluster name
+ tags:
+ - elementsw_add_node_ip
+ na_elementsw_node:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ node_ids: 10.109.48.65,10.109.48.66
+ cluster_name: sfcluster01
+ preset_only: true
+"""
+
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementSWNode(object):
+ """
+ Element SW Storage Node operations
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ node_ids=dict(required=True, type='list', elements='str', aliases=['node_id']),
+ cluster_name=dict(required=False, type='str'),
+ preset_only=dict(required=False, type='bool', default=False),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ input_params = self.module.params
+
+ self.state = input_params['state']
+ self.node_ids = input_params['node_ids']
+ self.cluster_name = input_params['cluster_name']
+ self.preset_only = input_params['preset_only']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(
+ msg="Unable to import the SolidFire Python SDK")
+ elif not self.preset_only:
+ # Cluster connection is only needed for add/delete operations
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def check_node_has_active_drives(self, node_id=None):
+ """
+ Check if node has active drives attached to cluster
+ :description: Validate if node have active drives in cluster
+
+ :return: True or False
+ :rtype: bool
+ """
+ if node_id is not None:
+ cluster_drives = self.sfe.list_drives()
+ for drive in cluster_drives.drives:
+ if drive.node_id == node_id and drive.status == "active":
+ return True
+ return False
+
+ @staticmethod
+ def extract_node_info(node_list):
+ summary = list()
+ for node in node_list:
+ node_dict = dict()
+ for key, value in vars(node).items():
+ if key in ['assigned_node_id', 'cip', 'mip', 'name', 'node_id', 'pending_node_id', 'sip']:
+ node_dict[key] = value
+ summary.append(node_dict)
+ return summary
+
+ def get_node_list(self):
+ """
+ Get Node List
+ :description: Find and retrieve node_ids from the active cluster
+
+ :return: None
+ :rtype: None
+ """
+ action_nodes_list = list()
+ if len(self.node_ids) > 0:
+ unprocessed_node_list = list(self.node_ids)
+ list_nodes = []
+ try:
+ all_nodes = self.sfe.list_all_nodes()
+ except netapp_utils.solidfire.common.ApiServerError as exception_object:
+ self.module.fail_json(msg='Error getting list of nodes from cluster: %s' % to_native(exception_object),
+ exception=traceback.format_exc())
+
+ # For add operation lookup for nodes list with status pendingNodes list
+ # else nodes will have to be traverse through active cluster
+ if self.state == "present":
+ list_nodes = all_nodes.pending_nodes
+ else:
+ list_nodes = all_nodes.nodes
+
+ for current_node in list_nodes:
+ if self.state == "absent" and \
+ (str(current_node.node_id) in self.node_ids or current_node.name in self.node_ids or current_node.mip in self.node_ids):
+ if self.check_node_has_active_drives(current_node.node_id):
+ self.module.fail_json(msg='Error deleting node %s: node has active drives' % current_node.name)
+ else:
+ action_nodes_list.append(current_node.node_id)
+ if self.state == "present" and \
+ (str(current_node.pending_node_id) in self.node_ids or current_node.name in self.node_ids or current_node.mip in self.node_ids):
+ action_nodes_list.append(current_node.pending_node_id)
+
+ # report an error if state == present and node is unknown
+ if self.state == "present":
+ for current_node in all_nodes.nodes:
+ if str(current_node.node_id) in unprocessed_node_list:
+ unprocessed_node_list.remove(str(current_node.node_id))
+ elif current_node.name in unprocessed_node_list:
+ unprocessed_node_list.remove(current_node.name)
+ elif current_node.mip in unprocessed_node_list:
+ unprocessed_node_list.remove(current_node.mip)
+ for current_node in all_nodes.pending_nodes:
+ if str(current_node.pending_node_id) in unprocessed_node_list:
+ unprocessed_node_list.remove(str(current_node.pending_node_id))
+ elif current_node.name in unprocessed_node_list:
+ unprocessed_node_list.remove(current_node.name)
+ elif current_node.mip in unprocessed_node_list:
+ unprocessed_node_list.remove(current_node.mip)
+ if len(unprocessed_node_list) > 0:
+ summary = dict(
+ nodes=self.extract_node_info(all_nodes.nodes),
+ pending_nodes=self.extract_node_info(all_nodes.pending_nodes),
+ pending_active_nodes=self.extract_node_info(all_nodes.pending_active_nodes)
+ )
+ self.module.fail_json(msg='Error adding nodes %s: nodes not in pending or active lists: %s' %
+ (to_native(unprocessed_node_list), repr(summary)))
+ return action_nodes_list
+
+ def add_node(self, nodes_list=None):
+ """
+ Add Node that are on PendingNodes list available on Cluster
+ """
+ try:
+ self.sfe.add_nodes(nodes_list, auto_install=True)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error adding nodes %s to cluster: %s' % (nodes_list, to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def remove_node(self, nodes_list=None):
+ """
+ Remove active node from Cluster
+ """
+ try:
+ self.sfe.remove_nodes(nodes_list)
+ except Exception as exception_object:
+ self.module.fail_json(msg='Error removing nodes %s from cluster %s' % (nodes_list, to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def set_cluster_name(self, node):
+ ''' set up cluster name for the node using its MIP '''
+ cluster = dict(cluster=self.cluster_name)
+ port = 442
+ try:
+ node_cx = netapp_utils.create_sf_connection(module=self.module, raise_on_connection_error=True, hostname=node, port=port)
+ except netapp_utils.solidfire.common.ApiConnectionError as exc:
+ if str(exc) == "Bad Credentials":
+ msg = 'Most likely the node %s is already in a cluster.' % node
+ msg += ' Make sure to use valid node credentials for username and password.'
+ msg += ' Node reported: %s' % repr(exc)
+ else:
+ msg = 'Failed to create connection: %s' % repr(exc)
+ self.module.fail_json(msg=msg)
+ except Exception as exc:
+ self.module.fail_json(msg='Failed to connect to %s:%d - %s' % (node, port, to_native(exc)),
+ exception=traceback.format_exc())
+
+ try:
+ cluster_config = node_cx.get_cluster_config()
+ except netapp_utils.solidfire.common.ApiServerError as exc:
+ self.module.fail_json(msg='Error getting cluster config: %s' % to_native(exc),
+ exception=traceback.format_exc())
+
+ if cluster_config.cluster.cluster == self.cluster_name:
+ return False
+ if cluster_config.cluster.state == 'Active':
+ self.module.fail_json(msg="Error updating cluster name for node %s, already in 'Active' state"
+ % node, cluster_config=repr(cluster_config))
+ if self.module.check_mode:
+ return True
+
+ try:
+ node_cx.set_cluster_config(cluster)
+ except netapp_utils.solidfire.common.ApiServerError as exc:
+ self.module.fail_json(msg='Error updating cluster name: %s' % to_native(exc),
+ cluster_config=repr(cluster_config),
+ exception=traceback.format_exc())
+ return True
+
+ def apply(self):
+ """
+ Check, process and initiate Cluster Node operation
+ """
+ changed = False
+ updated_nodes = list()
+ result_message = ''
+ if self.state == "present" and self.cluster_name is not None:
+ for node in self.node_ids:
+ if self.set_cluster_name(node):
+ changed = True
+ updated_nodes.append(node)
+ if not self.preset_only:
+ # let's see if there is anything to add or remove
+ action_nodes_list = self.get_node_list()
+ action = None
+ if self.state == "present" and len(action_nodes_list) > 0:
+ changed = True
+ action = 'added'
+ if not self.module.check_mode:
+ self.add_node(action_nodes_list)
+ elif self.state == "absent" and len(action_nodes_list) > 0:
+ changed = True
+ action = 'removed'
+ if not self.module.check_mode:
+ self.remove_node(action_nodes_list)
+ if action:
+ result_message = 'List of %s nodes: %s - requested: %s' % (action, to_native(action_nodes_list), to_native(self.node_ids))
+ if updated_nodes:
+ result_message += '\n' if result_message else ''
+ result_message += 'List of updated nodes with %s: %s' % (self.cluster_name, updated_nodes)
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+
+ na_elementsw_node = ElementSWNode()
+ na_elementsw_node.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_qos_policy.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_qos_policy.py
new file mode 100644
index 000000000..9d9e16994
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_qos_policy.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+Element Software QOS Policy
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_qos_policy
+
+short_description: NetApp Element Software create/modify/rename/delete QOS Policy
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 20.9.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, modify, rename, or delete QOS policy on Element Software Cluster.
+
+options:
+
+ state:
+ description:
+ - Whether the specified QOS policy should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ name:
+ description:
+ - Name or id for the QOS policy.
+ required: true
+ type: str
+
+ from_name:
+ description:
+ - Name or id for the QOS policy to be renamed.
+ type: str
+
+ qos:
+ description:
+ - The quality of service (QQOS) for the policy.
+ - Required for create
+ - Supported keys are minIOPS, maxIOPS, burstIOPS
+ type: dict
+ suboptions:
+ minIOPS:
+ description: The minimum number of IOPS guaranteed for the volume.
+ type: int
+ version_added: 21.3.0
+ maxIOPS:
+ description: The maximum number of IOPS allowed for the volume.
+ type: int
+ version_added: 21.3.0
+ burstIOPS:
+ description: The maximum number of IOPS allowed over a short period of time for the volume.
+ type: int
+ version_added: 21.3.0
+ debug:
+ description: report additional information when set to true.
+ type: bool
+ default: false
+ version_added: 21.3.0
+'''
+
+EXAMPLES = """
+ - name: Add QOS Policy
+ na_elementsw_qos_policy:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ name: gold
+ qos: {minIOPS: 1000, maxIOPS: 20000, burstIOPS: 50000}
+
+ - name: Modify QOS Policy
+ na_elementsw_qos_policy:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ name: gold
+ qos: {minIOPS: 100, maxIOPS: 5000, burstIOPS: 20000}
+
+ - name: Rename QOS Policy
+ na_elementsw_qos_policy:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ from_name: gold
+ name: silver
+
+ - name: Remove QOS Policy
+ na_elementsw_qos_policy:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ name: silver
+"""
+
+
+RETURN = """
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWQosPolicy(object):
+ """
+ Element Software QOS Policy
+ """
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ from_name=dict(required=False, type='str'),
+ qos=dict(required=False, type='dict', options=dict(
+ minIOPS=dict(type='int'),
+ maxIOPS=dict(type='int'),
+ burstIOPS=dict(type='int'),
+ )),
+ debug=dict(required=False, type='bool', default=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ # Set up state variables
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.qos_policy_id = None
+ self.debug = dict()
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_qos_policy')
+
+ def get_qos_policy(self, name):
+ """
+ Get QOS Policy
+ """
+ policy, error = self.elementsw_helper.get_qos_policy(name)
+ if error is not None:
+ self.module.fail_json(msg=error, exception=traceback.format_exc())
+ self.debug['current_policy'] = policy
+ return policy
+
+ def create_qos_policy(self, name, qos):
+ """
+ Create the QOS Policy
+ """
+ try:
+ self.sfe.create_qos_policy(name=name, qos=qos)
+ except (solidfire.common.ApiServerError, solidfire.common.ApiConnectionError) as exc:
+ self.module.fail_json(msg="Error creating qos policy: %s: %s" %
+ (name, to_native(exc)), exception=traceback.format_exc())
+
+ def update_qos_policy(self, qos_policy_id, modify, name=None):
+ """
+ Update the QOS Policy if the policy already exists
+ """
+ options = dict(
+ qos_policy_id=qos_policy_id
+ )
+ if name is not None:
+ options['name'] = name
+ if 'qos' in modify:
+ options['qos'] = modify['qos']
+
+ try:
+ self.sfe.modify_qos_policy(**options)
+ except (solidfire.common.ApiServerError, solidfire.common.ApiConnectionError) as exc:
+ self.module.fail_json(msg="Error updating qos policy: %s: %s" %
+ (self.parameters['from_name'] if name is not None else self.parameters['name'], to_native(exc)),
+ exception=traceback.format_exc())
+
+ def delete_qos_policy(self, qos_policy_id):
+ """
+ Delete the QOS Policy
+ """
+ try:
+ self.sfe.delete_qos_policy(qos_policy_id=qos_policy_id)
+ except (solidfire.common.ApiServerError, solidfire.common.ApiConnectionError) as exc:
+ self.module.fail_json(msg="Error deleting qos policy: %s: %s" %
+ (self.parameters['name'], to_native(exc)), exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Process the create/delete/rename/modify actions for qos policy on the Element Software Cluster
+ """
+ modify = dict()
+ current = self.get_qos_policy(self.parameters['name'])
+ qos_policy_id = None if current is None else current['qos_policy_id']
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ if cd_action == 'create' and self.parameters.get('from_name') is not None:
+ from_qos_policy = self.get_qos_policy(self.parameters['from_name'])
+ if from_qos_policy is None:
+ self.module.fail_json(msg="Error renaming qos policy, no existing policy with name/id: %s" % self.parameters['from_name'])
+ cd_action = 'rename'
+ qos_policy_id = from_qos_policy['qos_policy_id']
+ self.na_helper.changed = True
+ modify = self.na_helper.get_modified_attributes(from_qos_policy, self.parameters)
+ if cd_action == 'create' and 'qos' not in self.parameters:
+ self.module.fail_json(msg="Error creating qos policy: %s, 'qos:' option is required" % self.parameters['name'])
+ self.debug['modify'] = modify
+
+ if not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_qos_policy(self.parameters['name'], self.parameters['qos'])
+ elif cd_action == 'delete':
+ self.delete_qos_policy(qos_policy_id)
+ elif cd_action == 'rename':
+ self.update_qos_policy(qos_policy_id, modify, name=self.parameters['name'])
+ elif modify:
+ self.update_qos_policy(qos_policy_id, modify)
+
+ results = dict(changed=self.na_helper.changed)
+ if self.parameters['debug']:
+ results['debug'] = self.debug
+ self.module.exit_json(**results)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_qos_policy = ElementSWQosPolicy()
+ na_elementsw_qos_policy.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot.py
new file mode 100644
index 000000000..23144e42e
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot.py
@@ -0,0 +1,369 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+Element OS Software Snapshot Manager
+'''
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_snapshot
+
+short_description: NetApp Element Software Manage Snapshots
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+ - Create, Modify or Delete Snapshot on Element OS Cluster.
+
+options:
+ name:
+ description:
+ - Name of new snapshot create.
+ - If unspecified, date and time when the snapshot was taken is used.
+ type: str
+
+ state:
+ description:
+ - Whether the specified snapshot should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ src_volume_id:
+ description:
+ - ID or Name of active volume.
+ required: true
+ type: str
+
+ account_id:
+ description:
+ - Account ID or Name of Parent/Source Volume.
+ required: true
+ type: str
+
+ retention:
+ description:
+ - Retention period for the snapshot.
+ - Format is 'HH:mm:ss'.
+ type: str
+
+ src_snapshot_id:
+ description:
+ - ID or Name of an existing snapshot.
+ - Required when C(state=present), to modify snapshot properties.
+ - Required when C(state=present), to create snapshot from another snapshot in the volume.
+ - Required when C(state=absent), to delete snapshot.
+ type: str
+
+ enable_remote_replication:
+ description:
+ - Flag, whether to replicate the snapshot created to a remote replication cluster.
+ - To enable specify 'true' value.
+ type: bool
+
+ snap_mirror_label:
+ description:
+ - Label used by SnapMirror software to specify snapshot retention policy on SnapMirror endpoint.
+ type: str
+
+ expiration_time:
+ description:
+ - The date and time (format ISO 8601 date string) at which this snapshot will expire.
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create snapshot
+ tags:
+ - elementsw_create_snapshot
+ na_elementsw_snapshot:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ src_volume_id: 118
+ account_id: sagarsh
+ name: newsnapshot-1
+
+ - name: Modify Snapshot
+ tags:
+ - elementsw_modify_snapshot
+ na_elementsw_snapshot:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ src_volume_id: sagarshansivolume
+ src_snapshot_id: test1
+ account_id: sagarsh
+ expiration_time: '2018-06-16T12:24:56Z'
+ enable_remote_replication: false
+
+ - name: Delete Snapshot
+ tags:
+ - elementsw_delete_snapshot
+ na_elementsw_snapshot:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ src_snapshot_id: deltest1
+ account_id: sagarsh
+ src_volume_id: sagarshansivolume
+"""
+
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementOSSnapshot(object):
+ """
+ Element OS Snapshot Manager
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ account_id=dict(required=True, type='str'),
+ name=dict(required=False, type='str'),
+ src_volume_id=dict(required=True, type='str'),
+ retention=dict(required=False, type='str'),
+ src_snapshot_id=dict(required=False, type='str'),
+ enable_remote_replication=dict(required=False, type='bool'),
+ expiration_time=dict(required=False, type='str'),
+ snap_mirror_label=dict(required=False, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ input_params = self.module.params
+
+ self.state = input_params['state']
+ self.name = input_params['name']
+ self.account_id = input_params['account_id']
+ self.src_volume_id = input_params['src_volume_id']
+ self.src_snapshot_id = input_params['src_snapshot_id']
+ self.retention = input_params['retention']
+ self.properties_provided = False
+
+ self.expiration_time = input_params['expiration_time']
+ if input_params['expiration_time'] is not None:
+ self.properties_provided = True
+
+ self.enable_remote_replication = input_params['enable_remote_replication']
+ if input_params['enable_remote_replication'] is not None:
+ self.properties_provided = True
+
+ self.snap_mirror_label = input_params['snap_mirror_label']
+ if input_params['snap_mirror_label'] is not None:
+ self.properties_provided = True
+
+ if self.state == 'absent' and self.src_snapshot_id is None:
+ self.module.fail_json(
+ msg="Please provide required parameter : snapshot_id")
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(
+ msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_snapshot')
+
+ def get_account_id(self):
+ """
+ Return account id if found
+ """
+ try:
+ # Update and return self.account_id
+ self.account_id = self.elementsw_helper.account_exists(self.account_id)
+ return self.account_id
+ except Exception as err:
+ self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err))
+
+ def get_src_volume_id(self):
+ """
+ Return volume id if found
+ """
+ src_vol_id = self.elementsw_helper.volume_exists(self.src_volume_id, self.account_id)
+ if src_vol_id is not None:
+ # Update and return self.volume_id
+ self.src_volume_id = src_vol_id
+ # Return src_volume_id
+ return self.src_volume_id
+ return None
+
+ def get_snapshot(self, name=None):
+ """
+ Return snapshot details if found
+ """
+ src_snapshot = None
+ if name is not None:
+ src_snapshot = self.elementsw_helper.get_snapshot(name, self.src_volume_id)
+ elif self.src_snapshot_id is not None:
+ src_snapshot = self.elementsw_helper.get_snapshot(self.src_snapshot_id, self.src_volume_id)
+ if src_snapshot is not None:
+ # Update self.src_snapshot_id
+ self.src_snapshot_id = src_snapshot.snapshot_id
+ # Return src_snapshot
+ return src_snapshot
+
+ def create_snapshot(self):
+ """
+ Create Snapshot
+ """
+ try:
+ self.sfe.create_snapshot(volume_id=self.src_volume_id,
+ snapshot_id=self.src_snapshot_id,
+ name=self.name,
+ enable_remote_replication=self.enable_remote_replication,
+ retention=self.retention,
+ snap_mirror_label=self.snap_mirror_label,
+ attributes=self.attributes)
+ except Exception as exception_object:
+ self.module.fail_json(
+ msg='Error creating snapshot %s' % (
+ to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def modify_snapshot(self):
+ """
+ Modify Snapshot Properties
+ """
+ try:
+ self.sfe.modify_snapshot(snapshot_id=self.src_snapshot_id,
+ expiration_time=self.expiration_time,
+ enable_remote_replication=self.enable_remote_replication,
+ snap_mirror_label=self.snap_mirror_label)
+ except Exception as exception_object:
+ self.module.fail_json(
+ msg='Error modify snapshot %s' % (
+ to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def delete_snapshot(self):
+ """
+ Delete Snapshot
+ """
+ try:
+ self.sfe.delete_snapshot(snapshot_id=self.src_snapshot_id)
+ except Exception as exception_object:
+ self.module.fail_json(
+ msg='Error delete snapshot %s' % (
+ to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Check, process and initiate snapshot operation
+ """
+ changed = False
+ result_message = None
+ self.get_account_id()
+
+ # Dont proceed if source volume is not found
+ if self.get_src_volume_id() is None:
+ self.module.fail_json(msg="Volume id not found %s" % self.src_volume_id)
+
+ # Get snapshot details using source volume
+ snapshot_detail = self.get_snapshot()
+
+ if snapshot_detail:
+ if self.properties_provided:
+ if self.expiration_time != snapshot_detail.expiration_time:
+ changed = True
+ else: # To preserve value in case parameter expiration_time is not defined/provided.
+ self.expiration_time = snapshot_detail.expiration_time
+
+ if self.enable_remote_replication != snapshot_detail.enable_remote_replication:
+ changed = True
+ else: # To preserve value in case parameter enable_remote_Replication is not defined/provided.
+ self.enable_remote_replication = snapshot_detail.enable_remote_replication
+
+ if self.snap_mirror_label != snapshot_detail.snap_mirror_label:
+ changed = True
+ else: # To preserve value in case parameter snap_mirror_label is not defined/provided.
+ self.snap_mirror_label = snapshot_detail.snap_mirror_label
+
+ if self.account_id is None or self.src_volume_id is None or self.module.check_mode:
+ changed = False
+ result_message = "Check mode, skipping changes"
+ elif self.state == 'absent' and snapshot_detail is not None:
+ self.delete_snapshot()
+ changed = True
+ elif self.state == 'present' and snapshot_detail is not None:
+ if changed:
+ self.modify_snapshot() # Modify Snapshot properties
+ elif not self.properties_provided:
+ if self.name is not None:
+ snapshot = self.get_snapshot(self.name)
+ # If snapshot with name already exists return without performing any action
+ if snapshot is None:
+ self.create_snapshot() # Create Snapshot using parent src_snapshot_id
+ changed = True
+ else:
+ self.create_snapshot()
+ changed = True
+ elif self.state == 'present':
+ if self.name is not None:
+ snapshot = self.get_snapshot(self.name)
+ # If snapshot with name already exists return without performing any action
+ if snapshot is None:
+ self.create_snapshot() # Create Snapshot using parent src_snapshot_id
+ changed = True
+ else:
+ self.create_snapshot()
+ changed = True
+ else:
+ changed = False
+ result_message = "No changes requested, skipping changes"
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+
+ na_elementsw_snapshot = ElementOSSnapshot()
+ na_elementsw_snapshot.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_restore.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_restore.py
new file mode 100644
index 000000000..1e9d8e59a
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_restore.py
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+Element Software Snapshot Restore
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_snapshot_restore
+
+short_description: NetApp Element Software Restore Snapshot
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Element OS Cluster restore snapshot to volume.
+
+options:
+
+ src_volume_id:
+ description:
+ - ID or Name of source active volume.
+ required: true
+ type: str
+
+ src_snapshot_id:
+ description:
+ - ID or Name of an existing snapshot.
+ required: true
+ type: str
+
+ dest_volume_name:
+ description:
+ - New Name of destination for restoring the snapshot
+ required: true
+ type: str
+
+ account_id:
+ description:
+ - Account ID or Name of Parent/Source Volume.
+ required: true
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Restore snapshot to volume
+ tags:
+ - elementsw_create_snapshot_restore
+ na_elementsw_snapshot_restore:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ account_id: ansible-1
+ src_snapshot_id: snapshot_20171021
+ src_volume_id: volume-playarea
+ dest_volume_name: dest-volume-area
+
+"""
+
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementOSSnapshotRestore(object):
+ """
+ Element OS Restore from snapshot
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ account_id=dict(required=True, type='str'),
+ src_volume_id=dict(required=True, type='str'),
+ dest_volume_name=dict(required=True, type='str'),
+ src_snapshot_id=dict(required=True, type='str')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ input_params = self.module.params
+
+ self.account_id = input_params['account_id']
+ self.src_volume_id = input_params['src_volume_id']
+ self.dest_volume_name = input_params['dest_volume_name']
+ self.src_snapshot_id = input_params['src_snapshot_id']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(
+ msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_snapshot_restore')
+
+ def get_account_id(self):
+ """
+ Get account id if found
+ """
+ try:
+ # Update and return self.account_id
+ self.account_id = self.elementsw_helper.account_exists(self.account_id)
+ return self.account_id
+ except Exception as err:
+ self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err))
+
+ def get_snapshot_id(self):
+ """
+ Return snapshot details if found
+ """
+ src_snapshot = self.elementsw_helper.get_snapshot(self.src_snapshot_id, self.src_volume_id)
+ # Update and return self.src_snapshot_id
+ if src_snapshot:
+ self.src_snapshot_id = src_snapshot.snapshot_id
+ # Return self.src_snapshot_id
+ return self.src_snapshot_id
+ return None
+
+ def restore_snapshot(self):
+ """
+ Restore Snapshot to Volume
+ """
+ try:
+ self.sfe.clone_volume(volume_id=self.src_volume_id,
+ name=self.dest_volume_name,
+ snapshot_id=self.src_snapshot_id,
+ attributes=self.attributes)
+ except Exception as exception_object:
+ self.module.fail_json(
+ msg='Error restore snapshot %s' % (to_native(exception_object)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ """
+ Check, process and initiate restore snapshot to volume operation
+ """
+ changed = False
+ result_message = None
+ self.get_account_id()
+ src_vol_id = self.elementsw_helper.volume_exists(self.src_volume_id, self.account_id)
+
+ if src_vol_id is not None:
+ # Update self.src_volume_id
+ self.src_volume_id = src_vol_id
+ if self.get_snapshot_id() is not None:
+ # Addressing idempotency by comparing volume does not exist with same volume name
+ if self.elementsw_helper.volume_exists(self.dest_volume_name, self.account_id) is None:
+ self.restore_snapshot()
+ changed = True
+ else:
+ result_message = "No changes requested, Skipping changes"
+ else:
+ self.module.fail_json(msg="Snapshot id not found %s" % self.src_snapshot_id)
+ else:
+ self.module.fail_json(msg="Volume id not found %s" % self.src_volume_id)
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """
+ Main function
+ """
+ na_elementsw_snapshot_restore = ElementOSSnapshotRestore()
+ na_elementsw_snapshot_restore.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_schedule.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_schedule.py
new file mode 100644
index 000000000..2ace1bd4b
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_snapshot_schedule.py
@@ -0,0 +1,586 @@
+#!/usr/bin/python
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Element SW Software Snapshot Schedule"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_snapshot_schedule
+
+short_description: NetApp Element Software Snapshot Schedules
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, destroy, or update snapshot schedules on ElementSW
+
+options:
+
+ state:
+ description:
+ - Whether the specified schedule should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ paused:
+ description:
+ - Pause / Resume a schedule.
+ type: bool
+
+ recurring:
+ description:
+ - Should the schedule recur?
+ type: bool
+
+ schedule_type:
+ description:
+ - Schedule type for creating schedule.
+ choices: ['DaysOfWeekFrequency','DaysOfMonthFrequency','TimeIntervalFrequency']
+ type: str
+
+ time_interval_days:
+ description: Time interval in days.
+ type: int
+
+ time_interval_hours:
+ description: Time interval in hours.
+ type: int
+
+ time_interval_minutes:
+ description: Time interval in minutes.
+ type: int
+
+ days_of_week_weekdays:
+ description: List of days of the week (Sunday to Saturday)
+ type: list
+ elements: str
+
+ days_of_week_hours:
+ description: Time specified in hours
+ type: int
+
+ days_of_week_minutes:
+ description: Time specified in minutes.
+ type: int
+
+ days_of_month_monthdays:
+ description: List of days of the month (1-31)
+ type: list
+ elements: int
+
+ days_of_month_hours:
+ description: Time specified in hours
+ type: int
+
+ days_of_month_minutes:
+ description: Time specified in minutes.
+ type: int
+
+ name:
+ description:
+ - Name for the snapshot schedule.
+ - It accepts either schedule_id or schedule_name
+ - if name is digit, it will consider as schedule_id
+ - If name is string, it will consider as schedule_name
+ required: true
+ type: str
+
+ snapshot_name:
+ description:
+ - Name for the created snapshots.
+ type: str
+
+ volumes:
+ description:
+ - Volume IDs that you want to set the snapshot schedule for.
+ - It accepts both volume_name and volume_id
+ type: list
+ elements: str
+
+ account_id:
+ description:
+ - Account ID for the owner of this volume.
+ - It accepts either account_name or account_id
+ - if account_id is digit, it will consider as account_id
+ - If account_id is string, it will consider as account_name
+ type: str
+
+ retention:
+ description:
+ - Retention period for the snapshot.
+ - Format is 'HH:mm:ss'.
+ type: str
+
+ starting_date:
+ description:
+ - Starting date for the schedule.
+ - Required when C(state=present).
+ - "Format: C(2016-12-01T00:00:00Z)"
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create Snapshot schedule
+ na_elementsw_snapshot_schedule:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ name: Schedule_A
+ schedule_type: TimeIntervalFrequency
+ time_interval_days: 1
+ starting_date: '2016-12-01T00:00:00Z'
+ retention: '24:00:00'
+ volumes:
+ - 7
+ - test
+ account_id: 1
+
+ - name: Update Snapshot schedule
+ na_elementsw_snapshot_schedule:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ name: Schedule_A
+ schedule_type: TimeIntervalFrequency
+ time_interval_days: 1
+ starting_date: '2016-12-01T00:00:00Z'
+ retention: '24:00:00'
+ volumes:
+ - 8
+ - test1
+ account_id: 1
+
+ - name: Delete Snapshot schedule
+ na_elementsw_snapshot_schedule:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ name: 6
+"""
+
+RETURN = """
+
+schedule_id:
+ description: Schedule ID of the newly created schedule
+ returned: success
+ type: str
+"""
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ from solidfire.custom.models import DaysOfWeekFrequency, Weekday, DaysOfMonthFrequency
+ from solidfire.common import ApiConnectionError, ApiServerError
+ from solidfire.custom.models import TimeIntervalFrequency
+ from solidfire.models import Schedule, ScheduleInfo
+except ImportError:
+ HAS_SF_SDK = False
+
+try:
+ # Hack to see if we we have the 1.7 version of the SDK, or later
+ from solidfire.common.model import VER3
+ HAS_SF_SDK_1_7 = True
+ del VER3
+except ImportError:
+ HAS_SF_SDK_1_7 = False
+
+
+class ElementSWSnapShotSchedule(object):
+ """
+ Contains methods to parse arguments,
+ derive details of ElementSW objects
+ and send requests to ElementSW via
+ the ElementSW SDK
+ """
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check paramenters and ensure SDK is installed
+ """
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ schedule_type=dict(required=False, choices=['DaysOfWeekFrequency', 'DaysOfMonthFrequency', 'TimeIntervalFrequency']),
+
+ time_interval_days=dict(required=False, type='int'),
+ time_interval_hours=dict(required=False, type='int'),
+ time_interval_minutes=dict(required=False, type='int'),
+
+ days_of_week_weekdays=dict(required=False, type='list', elements='str'),
+ days_of_week_hours=dict(required=False, type='int'),
+ days_of_week_minutes=dict(required=False, type='int'),
+
+ days_of_month_monthdays=dict(required=False, type='list', elements='int'),
+ days_of_month_hours=dict(required=False, type='int'),
+ days_of_month_minutes=dict(required=False, type='int'),
+
+ paused=dict(required=False, type='bool'),
+ recurring=dict(required=False, type='bool'),
+
+ starting_date=dict(required=False, type='str'),
+
+ snapshot_name=dict(required=False, type='str'),
+ volumes=dict(required=False, type='list', elements='str'),
+ account_id=dict(required=False, type='str'),
+ retention=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['account_id', 'volumes', 'schedule_type']),
+ ('schedule_type', 'DaysOfMonthFrequency', ['days_of_month_monthdays']),
+ ('schedule_type', 'DaysOfWeekFrequency', ['days_of_week_weekdays'])
+
+ ],
+ supports_check_mode=True
+ )
+
+ param = self.module.params
+
+ # set up state variables
+ self.state = param['state']
+ self.name = param['name']
+ self.schedule_type = param['schedule_type']
+ self.days_of_week_weekdays = param['days_of_week_weekdays']
+ self.days_of_week_hours = param['days_of_week_hours']
+ self.days_of_week_minutes = param['days_of_week_minutes']
+ self.days_of_month_monthdays = param['days_of_month_monthdays']
+ self.days_of_month_hours = param['days_of_month_hours']
+ self.days_of_month_minutes = param['days_of_month_minutes']
+ self.time_interval_days = param['time_interval_days']
+ self.time_interval_hours = param['time_interval_hours']
+ self.time_interval_minutes = param['time_interval_minutes']
+ self.paused = param['paused']
+ self.recurring = param['recurring']
+ if self.schedule_type == 'DaysOfWeekFrequency':
+ # Create self.weekday list if self.schedule_type is days_of_week
+ if self.days_of_week_weekdays is not None:
+ # Create self.weekday list if self.schedule_type is days_of_week
+ self.weekdays = []
+ for day in self.days_of_week_weekdays:
+ if str(day).isdigit():
+ # If id specified, return appropriate day
+ self.weekdays.append(Weekday.from_id(int(day)))
+ else:
+ # If name specified, return appropriate day
+ self.weekdays.append(Weekday.from_name(day.capitalize()))
+
+ if self.state == 'present' and self.schedule_type is None:
+ # Mandate schedule_type for create operation
+ self.module.fail_json(
+ msg="Please provide required parameter: schedule_type")
+
+ # Mandate schedule name for delete operation
+ if self.state == 'absent' and self.name is None:
+ self.module.fail_json(
+ msg="Please provide required parameter: name")
+
+ self.starting_date = param['starting_date']
+ self.snapshot_name = param['snapshot_name']
+ self.volumes = param['volumes']
+ self.account_id = param['account_id']
+ self.retention = param['retention']
+ self.create_schedule_result = None
+
+ if HAS_SF_SDK is False:
+ # Create ElementSW connection
+ self.module.fail_json(msg="Unable to import the ElementSW Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ def get_schedule(self):
+ # Checking whether schedule id is exist or not
+ # Return schedule details if found, None otherwise
+ # If exist set variable self.name
+ try:
+ schedule_list = self.sfe.list_schedules()
+ except ApiServerError:
+ return None
+
+ for schedule in schedule_list.schedules:
+ if schedule.to_be_deleted:
+ # skip this schedule if it is being deleted, it can as well not exist
+ continue
+ if str(schedule.schedule_id) == self.name:
+ self.name = schedule.name
+ return schedule
+ elif schedule.name == self.name:
+ return schedule
+ return None
+
+ def get_account_id(self):
+ # Validate account id
+ # Return account_id if found, None otherwise
+ try:
+ account_id = self.elementsw_helper.account_exists(self.account_id)
+ return account_id
+ except ApiServerError:
+ return None
+
+ def get_volume_id(self):
+ # Validate volume_ids
+ # Return volume ids if found, fail if not found
+ volume_ids = []
+ for volume in self.volumes:
+ volume_id = self.elementsw_helper.volume_exists(volume.strip(), self.account_id)
+ if volume_id:
+ volume_ids.append(volume_id)
+ else:
+ self.module.fail_json(msg='Specified volume %s does not exist' % volume)
+ return volume_ids
+
+ def get_frequency(self):
+ # Configuring frequency depends on self.schedule_type
+ frequency = None
+ if self.schedule_type is not None and self.schedule_type == 'DaysOfWeekFrequency':
+ if self.weekdays is not None:
+ params = dict(weekdays=self.weekdays)
+ if self.days_of_week_hours is not None:
+ params['hours'] = self.days_of_week_hours
+ if self.days_of_week_minutes is not None:
+ params['minutes'] = self.days_of_week_minutes
+ frequency = DaysOfWeekFrequency(**params)
+ elif self.schedule_type is not None and self.schedule_type == 'DaysOfMonthFrequency':
+ if self.days_of_month_monthdays is not None:
+ params = dict(monthdays=self.days_of_month_monthdays)
+ if self.days_of_month_hours is not None:
+ params['hours'] = self.days_of_month_hours
+ if self.days_of_month_minutes is not None:
+ params['minutes'] = self.days_of_month_minutes
+ frequency = DaysOfMonthFrequency(**params)
+ elif self.schedule_type is not None and self.schedule_type == 'TimeIntervalFrequency':
+ params = dict()
+ if self.time_interval_days is not None:
+ params['days'] = self.time_interval_days
+ if self.time_interval_hours is not None:
+ params['hours'] = self.time_interval_hours
+ if self.time_interval_minutes is not None:
+ params['minutes'] = self.time_interval_minutes
+ if not params or sum(params.values()) == 0:
+ self.module.fail_json(msg='Specify at least one non zero value with TimeIntervalFrequency.')
+ frequency = TimeIntervalFrequency(**params)
+ return frequency
+
+ def is_same_schedule_type(self, schedule_detail):
+ # To check schedule type is same or not
+ if str(schedule_detail.frequency).split('(', maxsplit=1)[0] == self.schedule_type:
+ return True
+ else:
+ return False
+
+ def create_schedule(self):
+ # Create schedule
+ try:
+ frequency = self.get_frequency()
+ if frequency is None:
+ self.module.fail_json(msg='Failed to create schedule frequency object - type %s parameters' % self.schedule_type)
+
+ # Create schedule
+ name = self.name
+ schedule_info = ScheduleInfo(
+ volume_ids=self.volumes,
+ snapshot_name=self.snapshot_name,
+ retention=self.retention
+ )
+ if HAS_SF_SDK_1_7:
+ sched = Schedule(frequency, name, schedule_info)
+ else:
+ sched = Schedule(schedule_info, name, frequency)
+ sched.paused = self.paused
+ sched.recurring = self.recurring
+ sched.starting_date = self.starting_date
+
+ self.create_schedule_result = self.sfe.create_schedule(sched)
+
+ except (ApiServerError, ApiConnectionError) as exc:
+ self.module.fail_json(msg='Error creating schedule %s: %s' % (self.name, to_native(exc)),
+ exception=traceback.format_exc())
+
+ def delete_schedule(self, schedule_id):
+ # delete schedule
+ try:
+ get_schedule_result = self.sfe.get_schedule(schedule_id=schedule_id)
+ sched = get_schedule_result.schedule
+ sched.to_be_deleted = True
+ self.sfe.modify_schedule(schedule=sched)
+
+ except (ApiServerError, ApiConnectionError) as exc:
+ self.module.fail_json(msg='Error deleting schedule %s: %s' % (self.name, to_native(exc)),
+ exception=traceback.format_exc())
+
+ def update_schedule(self, schedule_id):
+ # Update schedule
+ try:
+ get_schedule_result = self.sfe.get_schedule(schedule_id=schedule_id)
+ sched = get_schedule_result.schedule
+ # Update schedule properties
+ sched.frequency = self.get_frequency()
+ if sched.frequency is None:
+ self.module.fail_json(msg='Failed to create schedule frequency object - type %s parameters' % self.schedule_type)
+
+ if self.volumes is not None and len(self.volumes) > 0:
+ sched.schedule_info.volume_ids = self.volumes
+ if self.retention is not None:
+ sched.schedule_info.retention = self.retention
+ if self.snapshot_name is not None:
+ sched.schedule_info.snapshot_name = self.snapshot_name
+ if self.paused is not None:
+ sched.paused = self.paused
+ if self.recurring is not None:
+ sched.recurring = self.recurring
+ if self.starting_date is not None:
+ sched.starting_date = self.starting_date
+
+ # Make API call
+ self.sfe.modify_schedule(schedule=sched)
+
+ except (ApiServerError, ApiConnectionError) as exc:
+ self.module.fail_json(msg='Error updating schedule %s: %s' % (self.name, to_native(exc)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ # Perform pre-checks, call functions and exit
+
+ changed = False
+ update_schedule = False
+
+ if self.account_id is not None:
+ self.account_id = self.get_account_id()
+
+ if self.state == 'present' and self.volumes is not None:
+ if self.account_id:
+ self.volumes = self.get_volume_id()
+ else:
+ self.module.fail_json(msg='Specified account id does not exist')
+
+ # Getting the schedule details
+ schedule_detail = self.get_schedule()
+
+ if schedule_detail is None and self.state == 'present':
+ if len(self.volumes) > 0:
+ changed = True
+ else:
+ self.module.fail_json(msg='Specified volumes not on cluster')
+ elif schedule_detail is not None:
+ # Getting the schedule id
+ if self.state == 'absent':
+ changed = True
+ else:
+ # Check if we need to update the snapshot schedule
+ if self.retention is not None and schedule_detail.schedule_info.retention != self.retention:
+ update_schedule = True
+ changed = True
+ elif self.snapshot_name is not None and schedule_detail.schedule_info.snapshot_name != self.snapshot_name:
+ update_schedule = True
+ changed = True
+ elif self.paused is not None and schedule_detail.paused != self.paused:
+ update_schedule = True
+ changed = True
+ elif self.recurring is not None and schedule_detail.recurring != self.recurring:
+ update_schedule = True
+ changed = True
+ elif self.starting_date is not None and schedule_detail.starting_date != self.starting_date:
+ update_schedule = True
+ changed = True
+ elif self.volumes is not None and len(self.volumes) > 0:
+ for volume_id in schedule_detail.schedule_info.volume_ids:
+ if volume_id not in self.volumes:
+ update_schedule = True
+ changed = True
+
+ temp_frequency = self.get_frequency()
+ if temp_frequency is not None:
+ # Checking schedule_type changes
+ if self.is_same_schedule_type(schedule_detail):
+ # If same schedule type
+ if self.schedule_type == "TimeIntervalFrequency":
+ # Check if there is any change in schedule.frequency, If schedule_type is time_interval
+ if schedule_detail.frequency.days != temp_frequency.days or \
+ schedule_detail.frequency.hours != temp_frequency.hours or \
+ schedule_detail.frequency.minutes != temp_frequency.minutes:
+ update_schedule = True
+ changed = True
+ elif self.schedule_type == "DaysOfMonthFrequency":
+ # Check if there is any change in schedule.frequency, If schedule_type is days_of_month
+ if len(schedule_detail.frequency.monthdays) != len(temp_frequency.monthdays) or \
+ schedule_detail.frequency.hours != temp_frequency.hours or \
+ schedule_detail.frequency.minutes != temp_frequency.minutes:
+ update_schedule = True
+ changed = True
+ elif len(schedule_detail.frequency.monthdays) == len(temp_frequency.monthdays):
+ actual_frequency_monthday = schedule_detail.frequency.monthdays
+ temp_frequency_monthday = temp_frequency.monthdays
+ for monthday in actual_frequency_monthday:
+ if monthday not in temp_frequency_monthday:
+ update_schedule = True
+ changed = True
+ elif self.schedule_type == "DaysOfWeekFrequency":
+ # Check if there is any change in schedule.frequency, If schedule_type is days_of_week
+ if len(schedule_detail.frequency.weekdays) != len(temp_frequency.weekdays) or \
+ schedule_detail.frequency.hours != temp_frequency.hours or \
+ schedule_detail.frequency.minutes != temp_frequency.minutes:
+ update_schedule = True
+ changed = True
+ elif len(schedule_detail.frequency.weekdays) == len(temp_frequency.weekdays):
+ actual_frequency_weekdays = schedule_detail.frequency.weekdays
+ temp_frequency_weekdays = temp_frequency.weekdays
+ if len([actual_weekday for actual_weekday, temp_weekday in
+ zip(actual_frequency_weekdays, temp_frequency_weekdays) if actual_weekday != temp_weekday]) != 0:
+ update_schedule = True
+ changed = True
+ else:
+ update_schedule = True
+ changed = True
+ else:
+ self.module.fail_json(msg='Failed to create schedule frequency object - type %s parameters' % self.schedule_type)
+
+ result_message = " "
+ if changed:
+ if self.module.check_mode:
+ # Skip changes
+ result_message = "Check mode, skipping changes"
+ else:
+ if self.state == 'present':
+ if update_schedule:
+ self.update_schedule(schedule_detail.schedule_id)
+ result_message = "Snapshot Schedule modified"
+ else:
+ self.create_schedule()
+ result_message = "Snapshot Schedule created"
+ elif self.state == 'absent':
+ self.delete_schedule(schedule_detail.schedule_id)
+ result_message = "Snapshot Schedule deleted"
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ sss = ElementSWSnapShotSchedule()
+ sss.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_vlan.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_vlan.py
new file mode 100644
index 000000000..299338ad5
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_vlan.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_vlan
+
+short_description: NetApp Element Software Manage VLAN
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete, modify VLAN
+
+options:
+
+ state:
+ description:
+ - Whether the specified vlan should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ vlan_tag:
+ description:
+ - Virtual Network Tag
+ required: true
+ type: str
+
+ name:
+ description:
+ - User defined name for the new VLAN
+ - Name of the vlan is unique
+ - Required for create
+ type: str
+
+ svip:
+ description:
+ - Storage virtual IP which is unique
+ - Required for create
+ type: str
+
+ address_blocks:
+ description:
+ - List of address blocks for the VLAN
+ - Each address block contains the starting IP address and size for the block
+ - Required for create
+ type: list
+ elements: dict
+
+ netmask:
+ description:
+ - Netmask for the VLAN
+ - Required for create
+ type: str
+
+ gateway:
+ description:
+ - Gateway for the VLAN
+ type: str
+
+ namespace:
+ description:
+ - Enable or disable namespaces
+ type: bool
+
+ attributes:
+ description:
+ - Dictionary of attributes with name and value for each attribute
+ type: dict
+
+'''
+
+EXAMPLES = """
+- name: Create vlan
+ na_elementsw_vlan:
+ state: present
+ name: test
+ vlan_tag: 1
+ svip: "{{ ip address }}"
+ netmask: "{{ netmask }}"
+ address_blocks:
+ - start: "{{ starting ip_address }}"
+ size: 5
+ - start: "{{ starting ip_address }}"
+ size: 5
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Delete Lun
+ na_elementsw_vlan:
+ state: absent
+ vlan_tag: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWVlan(object):
+ """ class to handle VLAN operations """
+
+ def __init__(self):
+ """
+ Setup Ansible parameters and ElementSW connection
+ """
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'],
+ default='present'),
+ name=dict(required=False, type='str'),
+ vlan_tag=dict(required=True, type='str'),
+ svip=dict(required=False, type='str'),
+ netmask=dict(required=False, type='str'),
+ gateway=dict(required=False, type='str'),
+ namespace=dict(required=False, type='bool'),
+ attributes=dict(required=False, type='dict'),
+ address_blocks=dict(required=False, type='list', elements='dict')
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.elem = netapp_utils.create_sf_connection(module=self.module)
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.elementsw_helper = NaElementSWModule(self.elem)
+
+ # add telemetry attributes
+ if self.parameters.get('attributes') is not None:
+ self.parameters['attributes'].update(self.elementsw_helper.set_element_attributes(source='na_elementsw_vlan'))
+ else:
+ self.parameters['attributes'] = self.elementsw_helper.set_element_attributes(source='na_elementsw_vlan')
+
+ def validate_keys(self):
+ """
+ Validate if all required keys are present before creating
+ """
+ required_keys = ['address_blocks', 'svip', 'netmask', 'name']
+ if all(item in self.parameters.keys() for item in required_keys) is False:
+ self.module.fail_json(msg="One or more required fields %s for creating VLAN is missing"
+ % required_keys)
+ addr_blk_fields = ['start', 'size']
+ for address in self.parameters['address_blocks']:
+ if 'start' not in address or 'size' not in address:
+ self.module.fail_json(msg="One or more required fields %s for address blocks is missing"
+ % addr_blk_fields)
+
+ def create_network(self):
+ """
+ Add VLAN
+ """
+ try:
+ self.validate_keys()
+ create_params = self.parameters.copy()
+ for key in ['username', 'hostname', 'password', 'state', 'vlan_tag']:
+ del create_params[key]
+ self.elem.add_virtual_network(virtual_network_tag=self.parameters['vlan_tag'], **create_params)
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error creating VLAN %s"
+ % self.parameters['vlan_tag'],
+ exception=to_native(err))
+
+ def delete_network(self):
+ """
+ Remove VLAN
+ """
+ try:
+ self.elem.remove_virtual_network(virtual_network_tag=self.parameters['vlan_tag'])
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error deleting VLAN %s"
+ % self.parameters['vlan_tag'],
+ exception=to_native(err))
+
+ def modify_network(self, modify):
+ """
+ Modify the VLAN
+ """
+ try:
+ self.elem.modify_virtual_network(virtual_network_tag=self.parameters['vlan_tag'], **modify)
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error modifying VLAN %s"
+ % self.parameters['vlan_tag'],
+ exception=to_native(err))
+
+ def get_network_details(self):
+ """
+ Check existing VLANs
+ :return: vlan details if found, None otherwise
+ :type: dict
+ """
+ vlans = self.elem.list_virtual_networks(virtual_network_tag=self.parameters['vlan_tag'])
+ vlan_details = dict()
+ for vlan in vlans.virtual_networks:
+ if vlan is not None:
+ vlan_details['name'] = vlan.name
+ vlan_details['address_blocks'] = list()
+ for address in vlan.address_blocks:
+ vlan_details['address_blocks'].append({
+ 'start': address.start,
+ 'size': address.size
+ })
+ vlan_details['svip'] = vlan.svip
+ vlan_details['gateway'] = vlan.gateway
+ vlan_details['netmask'] = vlan.netmask
+ vlan_details['namespace'] = vlan.namespace
+ vlan_details['attributes'] = vlan.attributes
+ return vlan_details
+ return None
+
+ def apply(self):
+ """
+ Call create / delete / modify vlan methods
+ """
+ network = self.get_network_details()
+ # calling helper to determine action
+ cd_action = self.na_helper.get_cd_action(network, self.parameters)
+ modify = self.na_helper.get_modified_attributes(network, self.parameters)
+ if not self.module.check_mode:
+ if cd_action == "create":
+ self.create_network()
+ elif cd_action == "delete":
+ self.delete_network()
+ elif modify:
+ if 'attributes' in modify:
+ # new attributes will replace existing ones
+ modify['attributes'] = self.parameters['attributes']
+ self.modify_network(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """ Apply vlan actions """
+ network_obj = ElementSWVlan()
+ network_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume.py
new file mode 100644
index 000000000..3fcaf00ce
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Element OS Software Volume Manager"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_volume
+
+short_description: NetApp Element Software Manage Volumes
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, destroy, or update volumes on ElementSW
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ name:
+ description:
+ - The name of the volume to manage.
+ - It accepts volume_name or volume_id
+ required: true
+ type: str
+
+ account_id:
+ description:
+ - Account ID for the owner of this volume.
+ - It accepts Account_id or Account_name
+ required: true
+ type: str
+
+ enable512e:
+ description:
+ - Required when C(state=present)
+ - Should the volume provide 512-byte sector emulation?
+ type: bool
+ aliases:
+ - enable512emulation
+
+ qos:
+ description: Initial quality of service settings for this volume. Configure as dict in playbooks.
+ type: dict
+
+ qos_policy_name:
+ description:
+ - Quality of service policy for this volume.
+ - It can be a name or an id.
+ - Mutually exclusive with C(qos) option.
+ type: str
+
+ attributes:
+ description: A YAML dictionary of attributes that you would like to apply on this volume.
+ type: dict
+
+ size:
+ description:
+ - The size of the volume in (size_unit).
+ - Required when C(state = present).
+ type: int
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+ type: str
+
+ access:
+ description:
+ - Access allowed for the volume.
+ - readOnly Only read operations are allowed.
+ - readWrite Reads and writes are allowed.
+ - locked No reads or writes are allowed.
+ - replicationTarget Identify a volume as the target volume for a paired set of volumes.
+ - If the volume is not paired, the access status is locked.
+ - If unspecified, the access settings of the clone will be the same as the source.
+ choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget']
+ type: str
+'''
+
+EXAMPLES = """
+ - name: Create Volume
+ na_elementsw_volume:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ name: AnsibleVol
+ qos: {minIOPS: 1000, maxIOPS: 20000, burstIOPS: 50000}
+ account_id: 3
+ enable512e: False
+ size: 1
+ size_unit: gb
+
+ - name: Update Volume
+ na_elementsw_volume:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: present
+ name: AnsibleVol
+ account_id: 3
+ access: readWrite
+
+ - name: Delete Volume
+ na_elementsw_volume:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ state: absent
+ name: AnsibleVol
+ account_id: 2
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWVolume(object):
+ """
+ Contains methods to parse arguments,
+ derive details of ElementSW objects
+ and send requests to ElementOS via
+ the ElementSW SDK
+ """
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check paramenters and ensure SDK is installed
+ """
+ self._size_unit_map = netapp_utils.SF_BYTE_MAP
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ account_id=dict(required=True),
+ enable512e=dict(required=False, type='bool', aliases=['enable512emulation']),
+ qos=dict(required=False, type='dict', default=None),
+ qos_policy_name=dict(required=False, type='str', default=None),
+ attributes=dict(required=False, type='dict', default=None),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+
+ access=dict(required=False, type='str', default=None,
+ choices=['readOnly', 'readWrite', 'locked', 'replicationTarget']),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['size', 'enable512e'])
+ ],
+ mutually_exclusive=[
+ ('qos', 'qos_policy_name'),
+ ],
+ supports_check_mode=True
+ )
+
+ param = self.module.params
+
+ # set up state variables
+ self.state = param['state']
+ self.name = param['name']
+ self.account_id = param['account_id']
+ self.enable512e = param['enable512e']
+ self.qos = param['qos']
+ self.qos_policy_name = param['qos_policy_name']
+ self.attributes = param['attributes']
+ self.access = param['access']
+ self.size_unit = param['size_unit']
+ if param['size'] is not None:
+ self.size = param['size'] * self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the ElementSW Python SDK")
+ else:
+ try:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+ except solidfire.common.ApiServerError:
+ self.module.fail_json(msg="Unable to create the connection")
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ if self.attributes is not None:
+ self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_volume'))
+ else:
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_volume')
+
+ def get_account_id(self):
+ """
+ Return account id if found
+ """
+ try:
+ # Update and return self.account_id
+ self.account_id = self.elementsw_helper.account_exists(self.account_id)
+ except Exception as err:
+ self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err))
+ return self.account_id
+
+ def get_qos_policy(self, name):
+ """
+ Get QOS Policy
+ """
+ policy, error = self.elementsw_helper.get_qos_policy(name)
+ if error is not None:
+ self.module.fail_json(msg=error)
+ return policy
+
+ def get_volume(self):
+ """
+ Return volume details if found
+ """
+ # Get volume details
+ volume_id = self.elementsw_helper.volume_exists(self.name, self.account_id)
+
+ if volume_id is not None:
+ # Return volume_details
+ volume_details = self.elementsw_helper.get_volume(volume_id)
+ if volume_details is not None:
+ return volume_details
+ return None
+
+ def create_volume(self, qos_policy_id):
+ """
+ Create Volume
+ :return: True if created, False if fails
+ """
+ options = dict(
+ name=self.name,
+ account_id=self.account_id,
+ total_size=self.size,
+ enable512e=self.enable512e,
+ attributes=self.attributes
+ )
+ if qos_policy_id is not None:
+ options['qos_policy_id'] = qos_policy_id
+ if self.qos is not None:
+ options['qos'] = self.qos
+ try:
+ self.sfe.create_volume(**options)
+ except Exception as err:
+ self.module.fail_json(msg="Error provisioning volume: %s of size: %s" % (self.name, self.size),
+ exception=to_native(err))
+
+ def delete_volume(self, volume_id):
+ """
+ Delete and purge the volume using volume id
+ :return: Success : True , Failed : False
+ """
+ try:
+ self.sfe.delete_volume(volume_id=volume_id)
+ self.sfe.purge_deleted_volume(volume_id=volume_id)
+ # Delete method will delete and also purge the volume instead of moving the volume state to inactive.
+
+ except Exception as err:
+ # Throwing the exact error message instead of generic error message
+ self.module.fail_json(msg='Error deleting volume: %s, %s' % (str(volume_id), to_native(err)),
+ exception=to_native(err))
+
+ def update_volume(self, volume_id, qos_policy_id):
+ """
+ Update the volume with the specified param
+ :return: Success : True, Failed : False
+ """
+ options = dict(
+ attributes=self.attributes
+ )
+ if self.access is not None:
+ options['access'] = self.access
+ if self.account_id is not None:
+ options['account_id'] = self.account_id
+ if self.qos is not None:
+ options['qos'] = self.qos
+ if qos_policy_id is not None:
+ options['qos_policy_id'] = qos_policy_id
+ if self.size is not None:
+ options['total_size'] = self.size
+ try:
+ self.sfe.modify_volume(volume_id, **options)
+ except Exception as err:
+ # Throwing the exact error message instead of generic error message
+ self.module.fail_json(msg='Error updating volume: %s, %s' % (str(volume_id), to_native(err)),
+ exception=to_native(err))
+
+ def apply(self):
+ # Perform pre-checks, call functions and exit
+ changed = False
+ qos_policy_id = None
+ action = None
+
+ self.get_account_id()
+ volume_detail = self.get_volume()
+
+ if self.state == 'present' and self.qos_policy_name is not None:
+ policy = self.get_qos_policy(self.qos_policy_name)
+ if policy is None:
+ error = 'Cannot find qos policy with name/id: %s' % self.qos_policy_name
+ self.module.fail_json(msg=error)
+ qos_policy_id = policy['qos_policy_id']
+
+ if volume_detail:
+ volume_id = volume_detail.volume_id
+ if self.state == 'absent':
+ action = 'delete'
+
+ elif self.state == 'present':
+ # Checking all the params for update operation
+ if self.access is not None and volume_detail.access != self.access:
+ action = 'update'
+
+ if self.account_id is not None and volume_detail.account_id != self.account_id:
+ action = 'update'
+
+ if qos_policy_id is not None and volume_detail.qos_policy_id != qos_policy_id:
+ # volume_detail.qos_policy_id may be None if no policy is associated with the volume
+ action = 'update'
+
+ if self.qos is not None and volume_detail.qos_policy_id is not None:
+ # remove qos_policy
+ action = 'update'
+
+ if self.qos is not None:
+ # Actual volume_detail.qos has ['burst_iops', 'burst_time', 'curve', 'max_iops', 'min_iops'] keys.
+ # As only minOPS, maxOPS, burstOPS is important to consider, checking only these values.
+ volume_qos = vars(volume_detail.qos)
+ if volume_qos['min_iops'] != self.qos['minIOPS'] or volume_qos['max_iops'] != self.qos['maxIOPS'] \
+ or volume_qos['burst_iops'] != self.qos['burstIOPS']:
+ action = 'update'
+
+ if self.size is not None and volume_detail.total_size is not None and volume_detail.total_size != self.size:
+ size_difference = abs(float(volume_detail.total_size - self.size))
+ # Change size only if difference is bigger than 0.001
+ if size_difference / self.size > 0.001:
+ action = 'update'
+
+ if self.attributes is not None and volume_detail.attributes != self.attributes:
+ action = 'update'
+
+ elif self.state == 'present':
+ action = 'create'
+
+ result_message = ""
+
+ if action is not None:
+ changed = True
+ if self.module.check_mode:
+ result_message = "Check mode, skipping changes"
+ else:
+ if action == 'create':
+ self.create_volume(qos_policy_id)
+ result_message = "Volume created"
+ elif action == 'update':
+ self.update_volume(volume_id, qos_policy_id)
+ result_message = "Volume updated"
+ elif action == 'delete':
+ self.delete_volume(volume_id)
+ result_message = "Volume deleted"
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ # Create object and call apply
+ na_elementsw_volume = ElementSWVolume()
+ na_elementsw_volume.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_clone.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_clone.py
new file mode 100644
index 000000000..186ca85bc
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_clone.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Element Software volume clone"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+
+module: na_elementsw_volume_clone
+
+short_description: NetApp Element Software Create Volume Clone
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create volume clones on Element OS
+
+options:
+
+ name:
+ description:
+ - The name of the clone.
+ required: true
+ type: str
+
+ src_volume_id:
+ description:
+ - The id of the src volume to clone. id may be a numeric identifier or a volume name.
+ required: true
+ type: str
+
+ src_snapshot_id:
+ description:
+ - The id of the snapshot to clone. id may be a numeric identifier or a snapshot name.
+ type: str
+
+ account_id:
+ description:
+ - Account ID for the owner of this cloned volume. id may be a numeric identifier or an account name.
+ required: true
+ type: str
+
+ attributes:
+ description: A YAML dictionary of attributes that you would like to apply on this cloned volume.
+ type: dict
+
+ size:
+ description:
+ - The size of the cloned volume in (size_unit).
+ type: int
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+ type: str
+
+ access:
+ choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget']
+ description:
+ - Access allowed for the volume.
+ - If unspecified, the access settings of the clone will be the same as the source.
+ - readOnly - Only read operations are allowed.
+ - readWrite - Reads and writes are allowed.
+ - locked - No reads or writes are allowed.
+ - replicationTarget - Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked.
+ type: str
+
+'''
+
+EXAMPLES = """
+ - name: Clone Volume
+ na_elementsw_volume_clone:
+ hostname: "{{ elementsw_hostname }}"
+ username: "{{ elementsw_username }}"
+ password: "{{ elementsw_password }}"
+ name: CloneAnsibleVol
+ src_volume_id: 123
+ src_snapshot_id: 41
+ account_id: 3
+ size: 1
+ size_unit: gb
+ access: readWrite
+ attributes: {"virtual_network_id": 12345}
+
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class ElementOSVolumeClone(object):
+ """
+ Contains methods to parse arguments,
+ derive details of Element Software objects
+ and send requests to Element OS via
+ the Solidfire SDK
+ """
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check paramenters and ensure SDK is installed
+ """
+ self._size_unit_map = netapp_utils.SF_BYTE_MAP
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True),
+ src_volume_id=dict(required=True),
+ src_snapshot_id=dict(),
+ account_id=dict(required=True),
+ attributes=dict(type='dict', default=None),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ access=dict(type='str',
+ default=None, choices=['readOnly', 'readWrite',
+ 'locked', 'replicationTarget']),
+
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ parameters = self.module.params
+
+ # set up state variables
+ self.name = parameters['name']
+ self.src_volume_id = parameters['src_volume_id']
+ self.src_snapshot_id = parameters['src_snapshot_id']
+ self.account_id = parameters['account_id']
+ self.attributes = parameters['attributes']
+
+ self.size_unit = parameters['size_unit']
+ if parameters['size'] is not None:
+ self.size = parameters['size'] * \
+ self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+ self.access = parameters['access']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(
+ msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.sfe)
+
+ # add telemetry attributes
+ if self.attributes is not None:
+ self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_volume_clone'))
+ else:
+ self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_volume_clone')
+
+ def get_account_id(self):
+ """
+ Return account id if found
+ """
+ try:
+ # Update and return self.account_id
+ self.account_id = self.elementsw_helper.account_exists(self.account_id)
+ return self.account_id
+ except Exception as err:
+ self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err))
+
+ def get_snapshot_id(self):
+ """
+ Return snapshot details if found
+ """
+ src_snapshot = self.elementsw_helper.get_snapshot(self.src_snapshot_id, self.src_volume_id)
+ # Update and return self.src_snapshot_id
+ if src_snapshot is not None:
+ self.src_snapshot_id = src_snapshot.snapshot_id
+ # Return src_snapshot
+ return self.src_snapshot_id
+ return None
+
+ def get_src_volume_id(self):
+ """
+ Return volume id if found
+ """
+ src_vol_id = self.elementsw_helper.volume_exists(self.src_volume_id, self.account_id)
+ if src_vol_id is not None:
+ # Update and return self.volume_id
+ self.src_volume_id = src_vol_id
+ # Return src_volume_id
+ return self.src_volume_id
+ return None
+
+ def clone_volume(self):
+ """Clone Volume from source"""
+ try:
+ self.sfe.clone_volume(volume_id=self.src_volume_id,
+ name=self.name,
+ new_account_id=self.account_id,
+ new_size=self.size,
+ access=self.access,
+ snapshot_id=self.src_snapshot_id,
+ attributes=self.attributes)
+
+ except Exception as err:
+ self.module.fail_json(msg="Error creating clone %s of size %s" % (self.name, self.size), exception=to_native(err))
+
+ def apply(self):
+ """Perform pre-checks, call functions and exit"""
+ changed = False
+ result_message = ""
+
+ if self.get_account_id() is None:
+ self.module.fail_json(msg="Account id not found: %s" % (self.account_id))
+
+ # there is only one state. other operations
+ # are part of the volume module
+
+ # ensure that a volume with the clone name
+ # isn't already present
+ if self.elementsw_helper.volume_exists(self.name, self.account_id) is None:
+ # check for the source volume
+ if self.get_src_volume_id() is not None:
+ # check for a valid snapshot
+ if self.src_snapshot_id and not self.get_snapshot_id():
+ self.module.fail_json(msg="Snapshot id not found: %s" % (self.src_snapshot_id))
+ # change required
+ changed = True
+ else:
+ self.module.fail_json(msg="Volume id not found %s" % (self.src_volume_id))
+
+ if changed:
+ if self.module.check_mode:
+ result_message = "Check mode, skipping changes"
+ else:
+ self.clone_volume()
+ result_message = "Volume cloned"
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ """Create object and call apply"""
+ volume_clone = ElementOSVolumeClone()
+ volume_clone.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_pair.py b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_pair.py
new file mode 100644
index 000000000..0d5b38a0d
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/plugins/modules/na_elementsw_volume_pair.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+DOCUMENTATION = '''
+
+module: na_elementsw_volume_pair
+
+short_description: NetApp Element Software Volume Pair
+extends_documentation_fragment:
+ - netapp.elementsw.netapp.solidfire
+version_added: 2.7.0
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+description:
+- Create, delete volume pair
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume pair should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+ src_volume:
+ description:
+ - Source volume name or volume ID
+ required: true
+ type: str
+
+ src_account:
+ description:
+ - Source account name or ID
+ required: true
+ type: str
+
+ dest_volume:
+ description:
+ - Destination volume name or volume ID
+ required: true
+ type: str
+
+ dest_account:
+ description:
+ - Destination account name or ID
+ required: true
+ type: str
+
+ mode:
+ description:
+ - Mode to start the volume pairing
+ choices: ['async', 'sync', 'snapshotsonly']
+ default: async
+ type: str
+
+ dest_mvip:
+ description:
+ - Destination IP address of the paired cluster.
+ required: true
+ type: str
+
+ dest_username:
+ description:
+ - Destination username for the paired cluster
+ - Optional if this is same as source cluster username.
+ type: str
+
+ dest_password:
+ description:
+ - Destination password for the paired cluster
+ - Optional if this is same as source cluster password.
+ type: str
+
+'''
+
+EXAMPLES = """
+ - name: Create volume pair
+ na_elementsw_volume_pair:
+ hostname: "{{ src_cluster_hostname }}"
+ username: "{{ src_cluster_username }}"
+ password: "{{ src_cluster_password }}"
+ state: present
+ src_volume: test1
+ src_account: test2
+ dest_volume: test3
+ dest_account: test4
+ mode: sync
+ dest_mvip: "{{ dest_cluster_hostname }}"
+
+ - name: Delete volume pair
+ na_elementsw_volume_pair:
+ hostname: "{{ src_cluster_hostname }}"
+ username: "{{ src_cluster_username }}"
+ password: "{{ src_cluster_password }}"
+ state: absent
+ src_volume: 3
+ src_account: 1
+ dest_volume: 2
+ dest_account: 1
+ dest_mvip: "{{ dest_cluster_hostname }}"
+ dest_username: "{{ dest_cluster_username }}"
+ dest_password: "{{ dest_cluster_password }}"
+
+"""
+
+RETURN = """
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module import NaElementSWModule
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+try:
+ import solidfire.common
+except ImportError:
+ HAS_SF_SDK = False
+
+
+class ElementSWVolumePair(object):
+ ''' class to handle volume pairing operations '''
+
+ def __init__(self):
+ """
+ Setup Ansible parameters and SolidFire connection
+ """
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'],
+ default='present'),
+ src_volume=dict(required=True, type='str'),
+ src_account=dict(required=True, type='str'),
+ dest_volume=dict(required=True, type='str'),
+ dest_account=dict(required=True, type='str'),
+ mode=dict(required=False, type='str',
+ choices=['async', 'sync', 'snapshotsonly'],
+ default='async'),
+ dest_mvip=dict(required=True, type='str'),
+ dest_username=dict(required=False, type='str'),
+ dest_password=dict(required=False, type='str', no_log=True)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.elem = netapp_utils.create_sf_connection(module=self.module)
+
+ self.elementsw_helper = NaElementSWModule(self.elem)
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # get element_sw_connection for destination cluster
+ # overwrite existing source host, user and password with destination credentials
+ self.module.params['hostname'] = self.parameters['dest_mvip']
+ # username and password is same as source,
+ # if dest_username and dest_password aren't specified
+ if self.parameters.get('dest_username'):
+ self.module.params['username'] = self.parameters['dest_username']
+ if self.parameters.get('dest_password'):
+ self.module.params['password'] = self.parameters['dest_password']
+ self.dest_elem = netapp_utils.create_sf_connection(module=self.module)
+ self.dest_elementsw_helper = NaElementSWModule(self.dest_elem)
+
+ def check_if_already_paired(self, vol_id):
+ """
+ Check for idempotency
+ A volume can have only one pair
+ Return paired-volume-id if volume is paired already
+ None if volume is not paired
+ """
+ paired_volumes = self.elem.list_volumes(volume_ids=[vol_id],
+ is_paired=True)
+ for vol in paired_volumes.volumes:
+ for pair in vol.volume_pairs:
+ if pair is not None:
+ return pair.remote_volume_id
+ return None
+
+ def pair_volumes(self):
+ """
+ Start volume pairing on source, and complete on target volume
+ """
+ try:
+ pair_key = self.elem.start_volume_pairing(
+ volume_id=self.parameters['src_vol_id'],
+ mode=self.parameters['mode'])
+ self.dest_elem.complete_volume_pairing(
+ volume_pairing_key=pair_key.volume_pairing_key,
+ volume_id=self.parameters['dest_vol_id'])
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error pairing volume id %s"
+ % (self.parameters['src_vol_id']),
+ exception=to_native(err))
+
+ def pairing_exists(self, src_id, dest_id):
+ src_paired = self.check_if_already_paired(self.parameters['src_vol_id'])
+ dest_paired = self.check_if_already_paired(self.parameters['dest_vol_id'])
+ if src_paired is not None or dest_paired is not None:
+ return True
+ return None
+
+ def unpair_volumes(self):
+ """
+ Delete volume pair
+ """
+ try:
+ self.elem.remove_volume_pair(volume_id=self.parameters['src_vol_id'])
+ self.dest_elem.remove_volume_pair(volume_id=self.parameters['dest_vol_id'])
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error unpairing volume ids %s and %s"
+ % (self.parameters['src_vol_id'],
+ self.parameters['dest_vol_id']),
+ exception=to_native(err))
+
+ def get_account_id(self, account, type):
+ """
+ Get source and destination account IDs
+ """
+ try:
+ if type == 'src':
+ self.parameters['src_account_id'] = self.elementsw_helper.account_exists(account)
+ elif type == 'dest':
+ self.parameters['dest_account_id'] = self.dest_elementsw_helper.account_exists(account)
+ except solidfire.common.ApiServerError as err:
+ self.module.fail_json(msg="Error: either account %s or %s does not exist"
+ % (self.parameters['src_account'],
+ self.parameters['dest_account']),
+ exception=to_native(err))
+
+ def get_volume_id(self, volume, type):
+ """
+ Get source and destination volume IDs
+ """
+ if type == 'src':
+ self.parameters['src_vol_id'] = self.elementsw_helper.volume_exists(volume, self.parameters['src_account_id'])
+ if self.parameters['src_vol_id'] is None:
+ self.module.fail_json(msg="Error: source volume %s does not exist"
+ % (self.parameters['src_volume']))
+ elif type == 'dest':
+ self.parameters['dest_vol_id'] = self.dest_elementsw_helper.volume_exists(volume, self.parameters['dest_account_id'])
+ if self.parameters['dest_vol_id'] is None:
+ self.module.fail_json(msg="Error: destination volume %s does not exist"
+ % (self.parameters['dest_volume']))
+
+ def get_ids(self):
+ """
+ Get IDs for volumes and accounts
+ """
+ self.get_account_id(self.parameters['src_account'], 'src')
+ self.get_account_id(self.parameters['dest_account'], 'dest')
+ self.get_volume_id(self.parameters['src_volume'], 'src')
+ self.get_volume_id(self.parameters['dest_volume'], 'dest')
+
+ def apply(self):
+ """
+ Call create / delete volume pair methods
+ """
+ self.get_ids()
+ paired = self.pairing_exists(self.parameters['src_vol_id'],
+ self.parameters['dest_vol_id'])
+ # calling helper to determine action
+ cd_action = self.na_helper.get_cd_action(paired, self.parameters)
+ if cd_action == "create":
+ self.pair_volumes()
+ elif cd_action == "delete":
+ self.unpair_volumes()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ """ Apply volume pair actions """
+ vol_obj = ElementSWVolumePair()
+ vol_obj.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/elementsw/requirements.txt b/ansible_collections/netapp/elementsw/requirements.txt
new file mode 100644
index 000000000..2054956e3
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/requirements.txt
@@ -0,0 +1 @@
+solidfire-sdk-python \ No newline at end of file
diff --git a/ansible_collections/netapp/elementsw/tests/unit/compat/__init__.py b/ansible_collections/netapp/elementsw/tests/unit/compat/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/compat/__init__.py
diff --git a/ansible_collections/netapp/elementsw/tests/unit/compat/builtins.py b/ansible_collections/netapp/elementsw/tests/unit/compat/builtins.py
new file mode 100644
index 000000000..f60ee6782
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/compat/builtins.py
@@ -0,0 +1,33 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+#
+# Compat for python2.7
+#
+
+# One unittest needs to import builtins via __import__() so we need to have
+# the string that represents it
+try:
+ import __builtin__
+except ImportError:
+ BUILTINS = 'builtins'
+else:
+ BUILTINS = '__builtin__'
diff --git a/ansible_collections/netapp/elementsw/tests/unit/compat/mock.py b/ansible_collections/netapp/elementsw/tests/unit/compat/mock.py
new file mode 100644
index 000000000..0972cd2e8
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/compat/mock.py
@@ -0,0 +1,122 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python3.x's unittest.mock module
+'''
+import sys
+
+# Python 2.7
+
+# Note: Could use the pypi mock library on python3.x as well as python2.x. It
+# is the same as the python3 stdlib mock library
+
+try:
+ # Allow wildcard import because we really do want to import all of mock's
+ # symbols into this compat shim
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ from unittest.mock import *
+except ImportError:
+ # Python 2
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ try:
+ from mock import *
+ except ImportError:
+ print('You need the mock library installed on python2.x to run tests')
+
+
+# Prior to 3.4.4, mock_open cannot handle binary read_data
+if sys.version_info >= (3,) and sys.version_info < (3, 4, 4):
+ file_spec = None
+
+ def _iterate_read_data(read_data):
+ # Helper for mock_open:
+ # Retrieve lines from read_data via a generator so that separate calls to
+ # readline, read, and readlines are properly interleaved
+ sep = b'\n' if isinstance(read_data, bytes) else '\n'
+ data_as_list = [l + sep for l in read_data.split(sep)]
+
+ if data_as_list[-1] == sep:
+ # If the last line ended in a newline, the list comprehension will have an
+ # extra entry that's just a newline. Remove this.
+ data_as_list = data_as_list[:-1]
+ else:
+ # If there wasn't an extra newline by itself, then the file being
+ # emulated doesn't have a newline to end the last line remove the
+ # newline that our naive format() added
+ data_as_list[-1] = data_as_list[-1][:-1]
+
+ for line in data_as_list:
+ yield line
+
+ def mock_open(mock=None, read_data=''):
+ """
+ A helper function to create a mock to replace the use of `open`. It works
+ for `open` called directly or used as a context manager.
+
+ The `mock` argument is the mock object to configure. If `None` (the
+ default) then a `MagicMock` will be created for you, with the API limited
+ to methods or attributes available on standard file handles.
+
+ `read_data` is a string for the `read` methoddline`, and `readlines` of the
+ file handle to return. This is an empty string by default.
+ """
+ def _readlines_side_effect(*args, **kwargs):
+ if handle.readlines.return_value is not None:
+ return handle.readlines.return_value
+ return list(_data)
+
+ def _read_side_effect(*args, **kwargs):
+ if handle.read.return_value is not None:
+ return handle.read.return_value
+ return type(read_data)().join(_data)
+
+ def _readline_side_effect():
+ if handle.readline.return_value is not None:
+ while True:
+ yield handle.readline.return_value
+ for line in _data:
+ yield line
+
+ global file_spec
+ if file_spec is None:
+ import _io
+ file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
+
+ if mock is None:
+ mock = MagicMock(name='open', spec=open)
+
+ handle = MagicMock(spec=file_spec)
+ handle.__enter__.return_value = handle
+
+ _data = _iterate_read_data(read_data)
+
+ handle.write.return_value = None
+ handle.read.return_value = None
+ handle.readline.return_value = None
+ handle.readlines.return_value = None
+
+ handle.read.side_effect = _read_side_effect
+ handle.readline.side_effect = _readline_side_effect()
+ handle.readlines.side_effect = _readlines_side_effect
+
+ mock.return_value = handle
+ return mock
diff --git a/ansible_collections/netapp/elementsw/tests/unit/compat/unittest.py b/ansible_collections/netapp/elementsw/tests/unit/compat/unittest.py
new file mode 100644
index 000000000..73a20cf8c
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/compat/unittest.py
@@ -0,0 +1,44 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python2.7's unittest module
+'''
+
+import sys
+
+import pytest
+
+# Allow wildcard import because we really do want to import all of
+# unittests's symbols into this compat shim
+# pylint: disable=wildcard-import,unused-wildcard-import
+if sys.version_info < (2, 7):
+ try:
+ # Need unittest2 on python2.6
+ from unittest2 import *
+ except ImportError:
+ print('You need unittest2 installed on python2.6.x to run tests')
+
+ class TestCase:
+ """ skip everything """
+ pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as unittest2 may not be available')
+else:
+ from unittest import *
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group.py
new file mode 100644
index 000000000..0bd1e2550
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group.py
@@ -0,0 +1,175 @@
+''' unit test for Ansible module: na_elementsw_account.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_access_group \
+ import ElementSWAccessGroup as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+ADD_ERROR = 'some_error_in_add_access_group'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+
+ def list_volume_access_groups(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build access_group list: access_groups.name, access_groups.account_id '''
+ access_groups = list()
+ access_group_list = self.Bunch(volume_access_groups=access_groups)
+ return access_group_list
+
+ def create_volume_access_group(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'add' in self.where:
+ # The module does not check for a specific exception :(
+ raise OSError(ADD_ERROR)
+
+ def get_account_by_name(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' returns account_id '''
+ if self.force_error and 'account_id' in self.where:
+ account_id = None
+ else:
+ account_id = 1
+ print('account_id', account_id)
+ account = self.Bunch(account_id=account_id)
+ result = self.Bunch(account=account)
+ return result
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_command_called(self, mock_create_sf_connection):
+ ''' a more interesting test '''
+ set_module_args({
+ 'state': 'present',
+ 'name': 'element_groupname',
+ 'account_id': 'element_account_id',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_add_exception(self, mock_create_sf_connection):
+ ''' a more interesting test '''
+ set_module_args({
+ 'state': 'present',
+ 'name': 'element_groupname',
+ 'account_id': 'element_account_id',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['add'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ # apply() is calling list_accounts() and add_account()
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error creating volume access group element_groupname: %s' % ADD_ERROR
+ assert exc.value.args[0]['msg'] == message
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_invalid_account_id(self, mock_create_sf_connection):
+ ''' a more interesting test '''
+ set_module_args({
+ 'state': 'present',
+ 'name': 'element_groupname',
+ 'account_id': 'element_account_id',
+ 'volumes': ['volume1'],
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['account_id'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ # apply() is calling list_accounts() and add_account()
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error: Specified account id "%s" does not exist.' % 'element_account_id'
+ assert exc.value.args[0]['msg'] == message
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py
new file mode 100644
index 000000000..fb78ad78a
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_access_group_volumes.py
@@ -0,0 +1,245 @@
+''' unit test for Ansible module: na_elementsw_access_group_volumes.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_access_group_volumes \
+ import ElementSWAccessGroupVolumes as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+MODIFY_ERROR = 'some_error_in_modify_access_group'
+
+VOLUME_ID = 777
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None, volume_id=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+ self.volume_id = volume_id
+
+ def list_volume_access_groups(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build access_group list: access_groups.name, access_groups.account_id '''
+ group_name = 'element_groupname'
+ if self.volume_id is None:
+ volume_list = list()
+ else:
+ volume_list = [self.volume_id]
+ access_group = self.Bunch(name=group_name, volume_access_group_id=888, volumes=volume_list)
+ access_groups = [access_group]
+ access_group_list = self.Bunch(volume_access_groups=access_groups)
+ return access_group_list
+
+ def list_volumes_for_account(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build volume list: volume.name, volume.id '''
+ volume = self.Bunch(name='element_volumename', volume_id=VOLUME_ID, delete_time='')
+ volumes = [volume]
+ volume_list = self.Bunch(volumes=volumes)
+ return volume_list
+
+ def modify_volume_access_group(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'modify_exception' in self.where:
+ # The module does not check for a specific exception :(
+ raise OSError(MODIFY_ERROR)
+
+ def get_account_by_name(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' returns account_id '''
+ if self.force_error and 'get_account_id' in self.where:
+ account_id = None
+ else:
+ account_id = 1
+ print('account_id', account_id)
+ account = self.Bunch(account_id=account_id)
+ result = self.Bunch(account=account)
+ return result
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ ARGS = {
+ 'state': 'present',
+ 'access_group': 'element_groupname',
+ 'volumes': 'element_volumename',
+ 'account_id': 'element_account_id',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_volume(self, mock_create_sf_connection):
+ ''' adding a volume '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_volume_idempotent(self, mock_create_sf_connection):
+ ''' adding a volume that is already in the access group '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(volume_id=VOLUME_ID)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_remove_volume(self, mock_create_sf_connection):
+ ''' removing a volume that is in the access group '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(volume_id=VOLUME_ID)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_remove_volume_idempotent(self, mock_create_sf_connection):
+ ''' removing a volume that is not in the access group '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_modify_exception(self, mock_create_sf_connection):
+ ''' modify does not return anything but can raise an exception '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['modify_exception'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error updating volume access group element_groupname: %s' % MODIFY_ERROR
+ assert exc.value.args[0]['msg'] == message
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_invalid_volume_name(self, mock_create_sf_connection):
+ ''' report error if volume does not exist '''
+ args = dict(self.ARGS)
+ args['volumes'] = ['volume1']
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error: Specified volume %s does not exist' % 'volume1'
+ assert exc.value.args[0]['msg'] == message
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_invalid_account_group_name(self, mock_create_sf_connection):
+ ''' report error if access group does not exist '''
+ args = dict(self.ARGS)
+ args['access_group'] = 'something_else'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error: Specified access group "%s" does not exist for account id: %s.' % ('something_else', 'element_account_id')
+ assert exc.value.args[0]['msg'] == message
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_invalid_account_id(self, mock_create_sf_connection):
+ ''' report error if account id is not found '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where='get_account_id')
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error: Specified account id "%s" does not exist.' % 'element_account_id'
+ assert exc.value.args[0]['msg'] == message
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_account.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_account.py
new file mode 100644
index 000000000..8075ba5c4
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_account.py
@@ -0,0 +1,137 @@
+''' unit test for Ansible module: na_elementsw_account.py '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_account \
+ import ElementSWAccount as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+ADD_ERROR = 'some_error_in_add_account'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+
+ def list_accounts(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build account list: account.username, account.account_id '''
+ accounts = list()
+ account_list = self.Bunch(accounts=accounts)
+ return account_list
+
+ def add_account(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'add' in self.where:
+ # The module does not check for a specific exception :(
+ raise OSError(ADD_ERROR)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_command_called(self, mock_create_sf_connection):
+ ''' a more interesting test '''
+ set_module_args({
+ 'state': 'present',
+ 'element_username': 'element_username',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_add_exception(self, mock_create_sf_connection):
+ ''' a more interesting test '''
+ set_module_args({
+ 'state': 'present',
+ 'element_username': 'element_username',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['add'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ # apply() is calling list_accounts() and add_account()
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error creating account element_username: %s' % ADD_ERROR
+ assert exc.value.args[0]['msg'] == message
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster.py
new file mode 100644
index 000000000..6624f374d
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster.py
@@ -0,0 +1,228 @@
+''' unit test for Ansible module: na_elementsw_cluster.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import inspect
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_cluster \
+ import ElementSWCluster as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+NODE_ID1 = 777
+NODE_ID2 = 888
+NODE_ID3 = 999
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __repr__(self):
+ results = dict()
+ for key, value in vars(self).items():
+ results[key] = repr(value)
+ return repr(results)
+
+ def __init__(self, force_error=False, where=None, nodes=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+ self.nodes = nodes
+ self._port = 442
+ self.called = list()
+
+ def record(self, args, kwargs):
+ name = inspect.stack()[1][3] # caller function name
+ print('%s: , args: %s, kwargs: %s' % (name, args, kwargs))
+ self.called.append(name)
+
+ def create_cluster(self, *args, **kwargs): # pylint: disable=unused-argument
+ self.record(repr(args), repr(kwargs))
+
+ def send_request(self, *args, **kwargs): # pylint: disable=unused-argument
+ self.record(repr(args), repr(kwargs))
+
+ def get_config(self, *args, **kwargs): # pylint: disable=unused-argument
+ self.record(repr(args), repr(kwargs))
+ if self.force_error and self.where == 'get_config_exception':
+ raise ConnectionError
+ if self.nodes is not None:
+ nodes = ['%d:%s' % (i, node) for i, node in enumerate(self.nodes)]
+ else:
+ nodes = list()
+ cluster = self.Bunch(ensemble=nodes, cluster='cl_name')
+ config = self.Bunch(cluster=cluster)
+ return self.Bunch(config=config)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ ARGS = {
+ # 'state': 'present',
+ 'management_virtual_ip': '10.10.10.10',
+ 'storage_virtual_ip': '10.10.10.11',
+ 'nodes': [NODE_ID1, NODE_ID2],
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create(self, mock_create_sf_connection):
+ ''' create cluster basic '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where='get_config_exception')
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+ msg = 'created'
+ assert msg in exc.value.args[0]['msg']
+ assert 'create_cluster' in my_obj.sfe_node.called
+ assert 'send_request' not in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_extra_parms(self, mock_create_sf_connection):
+ ''' force a direct call to send_request '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['order_number'] = '12345'
+ args['serial_number'] = '54321'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where='get_config_exception')
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+ assert 'send_request' in my_obj.sfe_node.called
+ assert 'create_cluster' not in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_idempotent(self, mock_create_sf_connection):
+ ''' cluster already exists with same nodes '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(nodes=[NODE_ID1, NODE_ID2])
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ assert 'send_request' not in my_obj.sfe_node.called
+ assert 'create_cluster' not in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_idempotent_extra_nodes(self, mock_create_sf_connection):
+ ''' cluster already exists with more nodes '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(nodes=[NODE_ID1, NODE_ID2, NODE_ID3])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ msg = 'Error: found existing cluster with more nodes in ensemble.'
+ assert msg in exc.value.args[0]['msg']
+ assert 'send_request' not in my_obj.sfe_node.called
+ assert 'create_cluster' not in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_idempotent_extra_nodes_ok(self, mock_create_sf_connection):
+ ''' cluster already exists with more nodes but we're OK with a superset '''
+ args = dict(self.ARGS)
+ args['fail_if_cluster_already_exists_with_larger_ensemble'] = False
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(nodes=[NODE_ID1, NODE_ID2, NODE_ID3])
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ msg = 'cluster already exists'
+ assert msg in exc.value.args[0]['msg']
+ assert 'send_request' not in my_obj.sfe_node.called
+ assert 'create_cluster' not in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_idempotent_missing_nodes(self, mock_create_sf_connection):
+ ''' cluster already exists with fewer nodes.
+ Since not every node is lister in the ensemble, we can't tell if it's an error or not '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(nodes=[NODE_ID1])
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ msg = 'cluster already exists'
+ assert msg in exc.value.args[0]['msg']
+ assert 'send_request' not in my_obj.sfe_node.called
+ assert 'create_cluster' not in my_obj.sfe_node.called
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_config.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_config.py
new file mode 100644
index 000000000..79f461ccc
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_config.py
@@ -0,0 +1,157 @@
+''' unit test for Ansible module: na_elementsw_cluster_config.py '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_cluster_config \
+ import ElementSWClusterConfig as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+GET_ERROR = 'some_error_in_get_ntp_info'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args(self):
+ return dict({
+ 'hostname': '10.253.168.129',
+ 'username': 'namburu',
+ 'password': 'SFlab1234',
+ })
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_module_fail_when_required_args_missing(self, mock_create_sf_connection):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_setup_ntp_info_called(self, mock_create_sf_connection):
+ ''' test if setup_ntp_info is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ ntp_dict = {'set_ntp_info': {'broadcastclient': None,
+ 'ntp_servers': ['1.1.1.1']}}
+ module_args.update(ntp_dict)
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_setup_ntp_info: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_set_encryption_at_rest_called(self, mock_create_sf_connection):
+ ''' test if set_encryption_at_rest is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'encryption_at_rest': 'present'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_set_encryption_at_rest enable: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+ module_args.update({'encryption_at_rest': 'absent'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_set_encryption_at_rest disable: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_enable_feature_called(self, mock_create_sf_connection):
+ ''' test if enable_feature for vvols is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'enable_virtual_volumes': True})
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_enable_feature: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_set_cluster_full_threshold_called(self, mock_create_sf_connection):
+ ''' test if set_cluster_full threshold is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ cluster_mod_dict = \
+ {'modify_cluster_full_threshold': {'stage2_aware_threshold': 2,
+ 'stage3_block_threshold_percent': 2,
+ 'max_metadata_over_provision_factor': 2}}
+ module_args.update(cluster_mod_dict)
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_set_cluster_full_threshold: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py
new file mode 100644
index 000000000..9236daa04
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_cluster_snmp.py
@@ -0,0 +1,176 @@
+''' unit test for Ansible module: na_elementsw_cluster_snmp.py '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_cluster_snmp \
+ import ElementSWClusterSnmp as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+GET_ERROR = 'some_error_in_get_snmp_info'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args(self):
+ return dict({
+ 'hostname': '10.117.78.131',
+ 'username': 'admin',
+ 'password': 'netapp1!',
+ })
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_module_fail_when_required_args_missing(self, mock_create_sf_connection):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_enable_snmp_called(self, mock_create_sf_connection):
+ ''' test if enable_snmp is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'snmp_v3_enabled': True,
+ 'state': 'present'})
+ module_args.update({'usm_users': {'access': 'rouser',
+ 'name': 'TestUser',
+ 'password': 'ChangeMe@123',
+ 'passphrase': 'ChangeMe@123',
+ 'secLevel': 'auth', }})
+
+ module_args.update({'networks': {'access': 'ro',
+ 'cidr': 24,
+ 'community': 'TestNetwork',
+ 'network': '192.168.0.1', }})
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_if_enable_snmp_called: %s' % repr(exc.value))
+ assert exc.value
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_configure_snmp_from_version_3_TO_version_2_called(self, mock_create_sf_connection):
+ ''' test if configure snmp from version_3 to version_2'''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'snmp_v3_enabled': False,
+ 'state': 'present'})
+ module_args.update({'usm_users': {'access': 'rouser',
+ 'name': 'TestUser',
+ 'password': 'ChangeMe@123',
+ 'passphrase': 'ChangeMe@123',
+ 'secLevel': 'auth', }})
+
+ module_args.update({'networks': {'access': 'ro',
+ 'cidr': 24,
+ 'community': 'TestNetwork',
+ 'network': '192.168.0.1', }})
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_ensure_configure_snmp_from_version_3_TO_version_2_called: %s' % repr(exc.value))
+ assert exc.value
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_configure_snmp_from_version_2_TO_version_3_called(self, mock_create_sf_connection):
+ ''' test if configure snmp from version_2 to version_3'''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'snmp_v3_enabled': True,
+ 'state': 'present'})
+ module_args.update({'usm_users': {'access': 'rouser',
+ 'name': 'TestUser_sample',
+ 'password': 'ChangeMe@123',
+ 'passphrase': 'ChangeMe@123',
+ 'secLevel': 'auth', }})
+
+ module_args.update({'networks': {'access': 'ro',
+ 'cidr': 24,
+ 'community': 'TestNetwork',
+ 'network': '192.168.0.1', }})
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_ensure_configure_snmp_from_version_2_TO_version_3_called: %s' % repr(exc.value))
+ assert exc.value
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_disable_snmp_called(self, mock_create_sf_connection):
+ ''' test if disable_snmp is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ module_args.update({'state': 'absent'})
+ set_module_args(module_args)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_if_disable_snmp_called: %s' % repr(exc.value))
+ assert exc.value
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_info.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_info.py
new file mode 100644
index 000000000..dc8fd5e23
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_info.py
@@ -0,0 +1,344 @@
+''' unit tests for Ansible module: na_elementsw_info.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import inspect
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_info \
+ import ElementSWInfo as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+NODE_ID1 = 777
+NODE_ID2 = 888
+NODE_ID3 = 999
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __repr__(self):
+ results = dict()
+ for key, value in vars(self).items():
+ results[key] = repr(value)
+ return repr(results)
+
+ def to_json(self):
+ return json.loads(json.dumps(self, default=lambda x: x.__dict__))
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+ self.nodes = [NODE_ID1, NODE_ID2, NODE_ID3]
+ self._port = 442
+ self.called = list()
+ if force_error and where == 'cx':
+ raise netapp_utils.solidfire.common.ApiConnectionError('testme')
+
+ def record(self, args, kwargs):
+ name = inspect.stack()[1][3] # caller function name
+ print('%s: , args: %s, kwargs: %s' % (name, args, kwargs))
+ self.called.append(name)
+
+ def list_accounts(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build account list: account.username, account.account_id '''
+ self.record(repr(args), repr(kwargs))
+ accounts = list()
+ accounts.append({'username': 'user1'})
+ account_list = self.Bunch(accounts=accounts)
+ return account_list
+
+ def list_all_nodes(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build all_node list: all_node.name, all_node.all_node_id '''
+ self.record(repr(args), repr(kwargs))
+ all_nodes = list()
+ all_nodes.append({'id': 123})
+ all_node_list = self.Bunch(all_nodes=all_nodes)
+ return all_node_list
+
+ def list_drives(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build drive list: drive.name, drive.drive_id '''
+ self.record(repr(args), repr(kwargs))
+ drives = list()
+ drives.append({'id': 123})
+ drive_list = self.Bunch(drives=drives)
+ return drive_list
+
+ def get_config(self, *args, **kwargs): # pylint: disable=unused-argument
+ self.record(repr(args), repr(kwargs))
+ if self.force_error and self.where == 'get_config_exception':
+ raise ConnectionError
+ if self.nodes is not None:
+ nodes = ['%d:%s' % (i, node) for i, node in enumerate(self.nodes)]
+ else:
+ nodes = list()
+ cluster = self.Bunch(ensemble=nodes, cluster='cl_name')
+ config = self.Bunch(cluster=cluster)
+ return self.Bunch(config=config)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ ARGS = {
+ # 'state': 'present',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_all_default(self, mock_create_sf_connection):
+ ''' gather all by default '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ assert 'cluster_accounts' in exc.value.args[0]['info']
+ assert 'node_config' in exc.value.args[0]['info']
+ username = exc.value.args[0]['info']['cluster_accounts']['accounts'][0]['username']
+ assert username == 'user1'
+ assert 'list_accounts' in my_obj.sfe_node.called
+ assert 'get_config' in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_all_all(self, mock_create_sf_connection):
+ ''' gather all explictly '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['all']
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ assert 'list_accounts' in my_obj.sfe_node.called
+ assert 'get_config' in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_all_clusters(self, mock_create_sf_connection):
+ ''' gather all cluster scoped subsets '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['all_clusters']
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ assert 'cluster_accounts' in exc.value.args[0]['info']
+ accounts = exc.value.args[0]['info']['cluster_accounts']
+ print('accounts: >>%s<<' % accounts, type(accounts))
+ print(my_obj.sfe_node.called)
+ assert 'list_accounts' in my_obj.sfe_node.called
+ assert 'get_config' not in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_all_nodes(self, mock_create_sf_connection):
+ ''' gather all node scoped subsets '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['all_nodes']
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ assert 'node_config' in exc.value.args[0]['info']
+ config = exc.value.args[0]['info']['node_config']
+ print('config: >>%s<<' % config, type(config))
+ print(my_obj.sfe_node.called)
+ assert 'list_accounts' not in my_obj.sfe_node.called
+ assert 'get_config' in my_obj.sfe_node.called
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_all_nodes_not_alone(self, mock_create_sf_connection):
+ ''' gather all node scoped subsets but fail as another subset is present '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['all_nodes', 'dummy']
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ msg = 'no other subset is allowed'
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_filter_success(self, mock_create_sf_connection):
+ ''' filter on key, value - succesful match '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['cluster_accounts']
+ args['filter'] = dict(username='user1')
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ username = exc.value.args[0]['info']['cluster_accounts']['accounts'][0]['username']
+ assert username == 'user1'
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_filter_bad_key(self, mock_create_sf_connection):
+ ''' filter on key, value - key not found '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['cluster_accounts']
+ args['filter'] = dict(bad_key='user1')
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ msg = 'Error: key bad_key not found in'
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_filter_bad_key_ignored(self, mock_create_sf_connection):
+ ''' filter on key, value - key not found - ignore error '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['cluster_accounts']
+ args['filter'] = dict(bad_key='user1')
+ args['fail_on_key_not_found'] = False
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['info']['cluster_accounts']['accounts'] == list()
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_filter_record_not_found(self, mock_create_sf_connection):
+ ''' filter on key, value - no match '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['cluster_accounts']
+ args['filter'] = dict(bad_key='user1')
+ args['fail_on_key_not_found'] = False
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['info']['cluster_accounts']['accounts'] == list()
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_info_filter_record_not_found_error(self, mock_create_sf_connection):
+ ''' filter on key, value - no match - force error on empty '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['gather_subsets'] = ['cluster_accounts']
+ args['filter'] = dict(username='user111')
+ args['fail_on_record_not_found'] = True
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ msg = 'Error: no match for'
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_connection_error(self, mock_create_sf_connection):
+ ''' filter on key, value - no match - force error on empty '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # force a connection exception
+ mock_create_sf_connection.side_effect = netapp_utils.solidfire.common.ApiConnectionError('testme')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ print(exc.value.args[0])
+ msg = 'Failed to create connection for hostname:442'
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_other_connection_error(self, mock_create_sf_connection):
+ ''' filter on key, value - no match - force error on empty '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # force a connection exception
+ mock_create_sf_connection.side_effect = KeyError('testme')
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ print(exc.value.args[0])
+ msg = 'Failed to connect for hostname:442'
+ assert msg in exc.value.args[0]['msg']
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_initiators.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_initiators.py
new file mode 100644
index 000000000..ee5ff85db
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_initiators.py
@@ -0,0 +1,201 @@
+''' unit test for Ansible module: na_elementsw_initiators.py '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_initiators \
+ import ElementSWInitiators as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ class Initiator(object):
+ def __init__(self, entries):
+ self.__dict__.update(entries)
+
+ def list_initiators(self):
+ ''' build initiator Obj '''
+ initiator = self.Bunch(
+ initiator_name="a",
+ initiator_id=13,
+ alias="a2",
+ # Note: 'config-mgmt' and 'event-source' are added for telemetry
+ attributes={'key': 'value', 'config-mgmt': 'ansible', 'event-source': 'na_elementsw_initiators'},
+ volume_access_groups=[1]
+ )
+ initiators = self.Bunch(
+ initiators=[initiator]
+ )
+ return initiators
+
+ def create_initiators(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' mock method '''
+ pass
+
+ def delete_initiators(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' mock method '''
+ pass
+
+ def modify_initiators(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' mock method '''
+ pass
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args(self):
+ return dict({
+ 'hostname': '10.253.168.129',
+ 'username': 'namburu',
+ 'password': 'SFlab1234',
+ })
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_module_fail_when_required_args_missing(self, mock_create_sf_connection):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_initiator(self, mock_create_sf_connection):
+ ''' test if create initiator is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ initiator_dict = {
+ "state": "present",
+ "initiators": [{
+ "name": "newinitiator1",
+ "alias": "newinitiator1alias",
+ "attributes": {"key1": "value1"}
+ }]
+ }
+ module_args.update(initiator_dict)
+ set_module_args(module_args)
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_initiators: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_delete_initiator(self, mock_create_sf_connection):
+ ''' test if delete initiator is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ initiator_dict = {
+ "state": "absent",
+ "initiators": [{
+ "name": "a"
+ }]
+ }
+ module_args.update(initiator_dict)
+ set_module_args(module_args)
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_delete_initiators: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_initiator(self, mock_create_sf_connection):
+ ''' test if modify initiator is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ initiator_dict = {
+ "state": "present",
+ "initiators": [{
+ "name": "a",
+ "alias": "a3",
+ "attributes": {"key": "value"}
+ }]
+ }
+ module_args.update(initiator_dict)
+ set_module_args(module_args)
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_modify_initiators: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_initiator_idempotent(self, mock_create_sf_connection):
+ ''' test if modify initiator is called '''
+ module_args = {}
+ module_args.update(self.set_default_args())
+ initiator_dict = {
+ "state": "present",
+ "initiators": [{
+ "name": "a",
+ "alias": "a2",
+ "attributes": {"key": "value"},
+ "volume_access_group_id": 1
+ }]
+ }
+ module_args.update(initiator_dict)
+ set_module_args(module_args)
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_modify_initiators: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py
new file mode 100644
index 000000000..5364a4e76
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_network_interfaces.py
@@ -0,0 +1,293 @@
+''' unit tests for Ansible module: na_elementsw_info.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import inspect
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_network_interfaces \
+ import ElementSWNetworkInterfaces as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+NODE_ID1 = 777
+NODE_ID2 = 888
+NODE_ID3 = 999
+
+MAPPING = dict(
+ bond_mode='bond-mode',
+ bond_lacp_rate='bond-lacp_rate',
+ dns_nameservers='dns-nameservers',
+ dns_search='dns-search',
+ virtual_network_tag='virtualNetworkTag',
+)
+
+
+def mapkey(key):
+ if key in MAPPING:
+ return MAPPING[key]
+ return key
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __repr__(self):
+ results = dict()
+ for key, value in vars(self).items():
+ results[key] = repr(value)
+ return repr(results)
+
+ def to_json(self):
+ return json.loads(json.dumps(self, default=lambda x: x.__dict__))
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+ # self._port = 442
+ self.called = list()
+ self.set_network_config_args = dict()
+ if force_error and where == 'cx':
+ raise netapp_utils.solidfire.common.ApiConnectionError('testme')
+
+ def record(self, args, kwargs): # pylint: disable=unused-argument
+ name = inspect.stack()[1][3] # caller function name
+ # print('%s: , args: %s, kwargs: %s' % (name, args, kwargs))
+ self.called.append(name)
+
+ def set_network_config(self, *args, **kwargs): # pylint: disable=unused-argument
+ self.record(repr(args), repr(kwargs))
+ print('network:', kwargs['network'].to_json())
+ self.set_network_config_args = kwargs['network'].to_json()
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ DEPRECATED_ARGS = {
+ 'ip_address_1g': 'ip_address_1g',
+ 'subnet_1g': 'subnet_1g',
+ 'gateway_address_1g': 'gateway_address_1g',
+ 'mtu_1g': 'mtu_1g', # make sure the use a value != from default
+ 'bond_mode_1g': 'ALB', # make sure the use a value != from default
+ 'lacp_1g': 'Fast', # make sure the use a value != from default
+ 'ip_address_10g': 'ip_address_10g',
+ 'subnet_10g': 'subnet_10g',
+ 'gateway_address_10g': 'gateway_address_10g',
+ 'mtu_10g': 'mtu_10g', # make sure the use a value != from default
+ 'bond_mode_10g': 'LACP', # make sure the use a value != from default
+ 'lacp_10g': 'Fast', # make sure the use a value != from default
+ 'method': 'static',
+ 'dns_nameservers': 'dns_nameservers',
+ 'dns_search_domains': 'dns_search_domains',
+ 'virtual_network_tag': 'virtual_network_tag',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ ARGS = {
+ 'bond_1g': {
+ 'address': '10.10.10.10',
+ 'netmask': '255.255.255.0',
+ 'gateway': '10.10.10.1',
+ 'mtu': '1500',
+ 'bond_mode': 'ActivePassive',
+ 'dns_nameservers': ['dns_nameservers'],
+ 'dns_search': ['dns_search_domains'],
+ 'virtual_network_tag': 'virtual_network_tag',
+ },
+ 'bond_10g': {
+ 'bond_mode': 'LACP',
+ 'bond_lacp_rate': 'Fast',
+ },
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def test_deprecated_nothing(self):
+ ''' deprecated without 1g or 10g options '''
+ args = dict(self.DEPRECATED_ARGS) # deep copy as other tests can modify args
+ for key in list(args):
+ if '1g' in key or '10g' in key:
+ del args[key]
+ set_module_args(args)
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ msg = 'Please use the new bond_1g or bond_10g options to configure the bond interfaces.'
+ assert msg in exc.value.args[0]['msg']
+ msg = 'This module cannot set or change "method"'
+ assert msg in exc.value.args[0]['msg']
+
+ def test_deprecated_all(self):
+ ''' deprecated with all options '''
+ args = dict(self.DEPRECATED_ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ msg = 'Please use the new bond_1g and bond_10g options to configure the bond interfaces.'
+ assert msg in exc.value.args[0]['msg']
+ msg = 'This module cannot set or change "method"'
+ assert msg in exc.value.args[0]['msg']
+
+ def test_deprecated_1g_only(self):
+ ''' deprecated with 1g options only '''
+ args = dict(self.DEPRECATED_ARGS) # deep copy as other tests can modify args
+ for key in list(args):
+ if '10g' in key:
+ del args[key]
+ set_module_args(args)
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ msg = 'Please use the new bond_1g option to configure the bond 1G interface.'
+ assert msg in exc.value.args[0]['msg']
+ msg = 'This module cannot set or change "method"'
+ assert msg in exc.value.args[0]['msg']
+
+ def test_deprecated_10g_only(self):
+ ''' deprecated with 10g options only '''
+ args = dict(self.DEPRECATED_ARGS) # deep copy as other tests can modify args
+ for key in list(args):
+ if '1g' in key:
+ del args[key]
+ set_module_args(args)
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_module()
+ msg = 'Please use the new bond_10g option to configure the bond 10G interface.'
+ assert msg in exc.value.args[0]['msg']
+ msg = 'This module cannot set or change "method"'
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_nothing(self, mock_create_sf_connection):
+ ''' modify without 1g or 10g options '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ for key in list(args):
+ if '1g' in key or '10g' in key:
+ del args[key]
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ print('LN:', my_obj.module.params)
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ assert len(my_obj.sfe.set_network_config_args) == 0
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_all(self, mock_create_sf_connection):
+ ''' modify with all options '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+ assert 'Bond1G' in my_obj.sfe.set_network_config_args
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_1g_only(self, mock_create_sf_connection):
+ ''' modify with 1g options only '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ for key in list(args):
+ if '10g' in key:
+ del args[key]
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+ assert 'Bond1G' in my_obj.sfe.set_network_config_args
+ assert 'Bond10G' not in my_obj.sfe.set_network_config_args
+ print(my_obj.sfe.set_network_config_args['Bond1G'])
+ for key in args['bond_1g']:
+ if key != 'bond_lacp_rate':
+ assert my_obj.sfe.set_network_config_args['Bond1G'][mapkey(key)] == args['bond_1g'][key]
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_10g_only(self, mock_create_sf_connection):
+ ''' modify with 10g options only '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ for key in list(args):
+ if '1g' in key:
+ del args[key]
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+ assert 'Bond1G' not in my_obj.sfe.set_network_config_args
+ assert 'Bond10G' in my_obj.sfe.set_network_config_args
+ assert my_obj.sfe.set_network_config_args['Bond10G']['bond-lacp_rate'] == args['bond_10g']['bond_lacp_rate']
+ for key in args['bond_10g']:
+ assert my_obj.sfe.set_network_config_args['Bond10G'][mapkey(key)] == args['bond_10g'][key]
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_nodes.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_nodes.py
new file mode 100644
index 000000000..3e163d000
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_nodes.py
@@ -0,0 +1,324 @@
+''' unit test for Ansible module: na_elementsw_node.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_node \
+ import ElementSWNode as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+MODIFY_ERROR = 'some_error_in_modify_access_group'
+
+NODE_ID1 = 777
+NODE_ID2 = 888
+NODE_NAME1 = 'node_name1'
+NODE_NAME2 = 'node_name2'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None, node_id=None, cluster_name='', node_state='Pending'):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+ self.node_id = node_id
+ self.cluster_name = cluster_name
+ self.node_state = node_state
+
+ def list_all_nodes(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build access_group list: access_groups.name, access_groups.account_id '''
+ nodes = list()
+ pending_nodes = list()
+ active_pending_nodes = list()
+ if self.node_id is None:
+ node_list = list()
+ else:
+ node_list = [self.node_id]
+ attrs1 = dict(mip='10.10.10.101', name=NODE_NAME1, node_id=NODE_ID1)
+ attrs2 = dict(mip='10.10.10.101', name=NODE_NAME2, node_id=NODE_ID2)
+ if self.where == 'pending':
+ attrs1['pending_node_id'] = NODE_ID1
+ attrs2['pending_node_id'] = NODE_ID2
+ node1 = self.Bunch(**attrs1)
+ node2 = self.Bunch(**attrs2)
+ if self.where == 'nodes':
+ nodes = [node1, node2]
+ elif self.where == 'pending':
+ pending_nodes = [node1, node2]
+ elif self.where == 'active_pending':
+ active_pending_nodes = [node1, node2]
+ node_list = self.Bunch(nodes=nodes, pending_nodes=pending_nodes, pending_active_nodes=active_pending_nodes)
+ return node_list
+
+ def add_nodes(self, *args, **kwargs): # pylint: disable=unused-argument
+ print('adding_node: ', repr(args), repr(kwargs))
+
+ def remove_nodes(self, *args, **kwargs): # pylint: disable=unused-argument
+ print('adding_node: ', repr(args), repr(kwargs))
+
+ def get_cluster_config(self, *args, **kwargs): # pylint: disable=unused-argument
+ print('get_cluster_config: ', repr(args), repr(kwargs))
+ cluster = self.Bunch(cluster=self.cluster_name, state=self.node_state)
+ return self.Bunch(cluster=cluster)
+
+ def set_cluster_config(self, *args, **kwargs): # pylint: disable=unused-argument
+ print('set_cluster_config: ', repr(args), repr(kwargs))
+
+ def list_drives(self, *args, **kwargs): # pylint: disable=unused-argument
+ print('list_drives: ', repr(args), repr(kwargs))
+ drive = self.Bunch(node_id=self.node_id, status="active")
+ return self.Bunch(drives=[drive])
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ ARGS = {
+ 'state': 'present',
+ 'node_ids': [NODE_ID1, NODE_ID2],
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_node_fail_not_pending(self, mock_create_sf_connection):
+ ''' adding a node - fails as these nodes are unknown '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ msg = 'nodes not in pending or active lists'
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_node(self, mock_create_sf_connection):
+ ''' adding a node '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(where='pending')
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_node_idempotent(self, mock_create_sf_connection):
+ ''' adding a node that is already in the cluster '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(where='nodes')
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_remove_node(self, mock_create_sf_connection):
+ ''' removing a node that is in the cluster '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(where='nodes')
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_remove_node_idempotent(self, mock_create_sf_connection):
+ ''' removing a node that is not in the cluster '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_remove_node_with_active_drive(self, mock_create_sf_connection):
+ ''' removing a node that is in the cluster but still associated with a drive '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(node_id=NODE_ID1, where='nodes')
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ msg = 'Error deleting node %s: node has active drives' % NODE_NAME1
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_set_cluster_name_only(self, mock_create_sf_connection):
+ ''' set cluster name without adding the node '''
+ args = dict(self.ARGS)
+ args['preset_only'] = True
+ args['cluster_name'] = 'cluster_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+ message = 'List of updated nodes with cluster_name:'
+ assert message in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_set_cluster_name_only_idempotent(self, mock_create_sf_connection):
+ ''' set cluster name without adding the node - name already set '''
+ args = dict(self.ARGS)
+ args['preset_only'] = True
+ args['cluster_name'] = 'cluster_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(cluster_name=args['cluster_name'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ message = ''
+ assert message == exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_set_cluster_name_and_add(self, mock_create_sf_connection):
+ ''' set cluster name and add the node '''
+ args = dict(self.ARGS)
+ args['cluster_name'] = 'cluster_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(where='pending')
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+ message = 'List of updated nodes with cluster_name:'
+ assert message in exc.value.args[0]['msg']
+ message = 'List of added nodes: '
+ assert message in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_set_cluster_name_and_add_idempotent(self, mock_create_sf_connection):
+ ''' set cluster name and add the node '''
+ args = dict(self.ARGS)
+ args['cluster_name'] = 'cluster_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(where='nodes', cluster_name=args['cluster_name'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ message = ''
+ assert message == exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_set_cluster_name_already_active_no_change(self, mock_create_sf_connection):
+ ''' set cluster name fails because node state is 'Active' '''
+ args = dict(self.ARGS)
+ args['cluster_name'] = 'cluster_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(where='nodes', cluster_name=args['cluster_name'], node_state='Active')
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+ message = ''
+ assert message == exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_set_cluster_name_already_active_change_not_allowed(self, mock_create_sf_connection):
+ ''' set cluster name fails because node state is 'Active' '''
+ args = dict(self.ARGS)
+ args['cluster_name'] = 'new_cluster_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(where='nodes', cluster_name='old_cluster_name', node_state='Active')
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = "Error updating cluster name for node %s, already in 'Active' state" % NODE_ID1
+ assert message == exc.value.args[0]['msg']
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_qos_policy.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_qos_policy.py
new file mode 100644
index 000000000..83ac3711a
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_qos_policy.py
@@ -0,0 +1,300 @@
+''' unit test for Ansible module: na_elementsw_qos_policy.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_qos_policy \
+ import ElementSWQosPolicy as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+CREATE_ERROR = 'create', 'some_error_in_create_qos_policy'
+MODIFY_ERROR = 'modify', 'some_error_in_modify_qos_policy'
+DELETE_ERROR = 'delete', 'some_error_in_delete_qos_policy'
+
+POLICY_ID = 888
+POLICY_NAME = 'element_qos_policy_name'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None, qos_policy_name=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+ self.policy_name = qos_policy_name
+
+ def list_qos_policies(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build qos_policy list: qos_policy.name, qos_policy.account_id '''
+ if self.policy_name:
+ qos_policy_name = self.policy_name
+ else:
+ qos_policy_name = POLICY_NAME
+ qos = self.Bunch(min_iops=1000, max_iops=20000, burst_iops=20000)
+ qos_policy = self.Bunch(name=qos_policy_name, qos_policy_id=POLICY_ID, qos=qos)
+ qos_policies = [qos_policy]
+ qos_policy_list = self.Bunch(qos_policies=qos_policies)
+ return qos_policy_list
+
+ def create_qos_policy(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'create_exception' in self.where:
+ raise netapp_utils.solidfire.common.ApiServerError(*CREATE_ERROR)
+
+ def modify_qos_policy(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'modify_exception' in self.where:
+ raise netapp_utils.solidfire.common.ApiServerError(*MODIFY_ERROR)
+
+ def delete_qos_policy(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'delete_exception' in self.where:
+ raise netapp_utils.solidfire.common.ApiServerError(*DELETE_ERROR)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ ARGS = {
+ 'state': 'present',
+ 'name': 'element_qos_policy_name',
+ 'qos': {'minIOPS': 1000, 'maxIOPS': 20000, 'burstIOPS': 20000},
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_qos_policy(self, mock_create_sf_connection):
+ ''' adding a qos_policy '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['name'] += '_1' # new name to force a create
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_qos_policy_idempotent(self, mock_create_sf_connection):
+ ''' adding a qos_policy '''
+ args = dict(self.ARGS)
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_delete_qos_policy(self, mock_create_sf_connection):
+ ''' removing a qos policy '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_delete_qos_policy_idempotent(self, mock_create_sf_connection):
+ ''' removing a qos policy '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ args['name'] += '_1' # new name to force idempotency
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_qos_policy(self, mock_create_sf_connection):
+ ''' modifying a qos policy '''
+ args = dict(self.ARGS)
+ args['qos'] = {'minIOPS': 2000}
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_rename_qos_policy(self, mock_create_sf_connection):
+ ''' renaming a qos policy '''
+ args = dict(self.ARGS)
+ args['from_name'] = args['name']
+ args['name'] = 'a_new_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_rename_modify_qos_policy_idempotent(self, mock_create_sf_connection):
+ ''' renaming a qos policy '''
+ args = dict(self.ARGS)
+ args['from_name'] = 'some_older_name'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_qos_policy_exception(self, mock_create_sf_connection):
+ ''' creating a qos policy can raise an exception '''
+ args = dict(self.ARGS)
+ args['name'] += '_1' # new name to force a create
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['create_exception'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error creating qos policy: %s' % POLICY_NAME
+ assert exc.value.args[0]['msg'].startswith(message)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_qos_policy_exception(self, mock_create_sf_connection):
+ ''' modifying a qos policy can raise an exception '''
+ args = dict(self.ARGS)
+ args['qos'] = {'minIOPS': 2000}
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['modify_exception'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error updating qos policy: %s' % POLICY_NAME
+ assert exc.value.args[0]['msg'].startswith(message)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_delete_qos_policy_exception(self, mock_create_sf_connection):
+ ''' deleting a qos policy can raise an exception '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['delete_exception'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error deleting qos policy: %s' % POLICY_NAME
+ assert exc.value.args[0]['msg'].startswith(message)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_missing_qos_option(self, mock_create_sf_connection):
+ ''' report error if qos option is not given on create '''
+ args = dict(self.ARGS)
+ args['name'] += '_1' # new name to force a create
+ args.pop('qos')
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = "Error creating qos policy: %s, 'qos:' option is required" % args['name']
+ assert exc.value.args[0]['msg'] == message
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_missing_from_name_policy(self, mock_create_sf_connection):
+ ''' report error if qos policy to rename does not exist '''
+ args = dict(self.ARGS)
+ args['name'] += '_1' # new name to force a create
+ args['from_name'] = 'something_not_likely_to_exist'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = "Error renaming qos policy, no existing policy with name/id: %s" % args['from_name']
+ assert exc.value.args[0]['msg'] == message
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_template.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_template.py
new file mode 100644
index 000000000..7dc6e2d6b
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_template.py
@@ -0,0 +1,138 @@
+''' unit test for Ansible module: na_elementsw_account.py '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_account \
+ import ElementSWAccount as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+ADD_ERROR = 'some_error_in_add_account'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+
+# TODO: replace list_accounts and add_account as needed
+ def list_accounts(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build account list: account.username, account.account_id '''
+ accounts = list()
+ account_list = self.Bunch(accounts=accounts)
+ return account_list
+
+ def add_account(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'add' in self.where:
+ # The module does not check for a specific exception :(
+ raise OSError(ADD_ERROR)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_ensure_command_called(self, mock_create_sf_connection):
+ ''' a more interesting test '''
+ set_module_args({
+ 'state': 'present',
+ 'element_username': 'element_username',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_add_exception(self, mock_create_sf_connection):
+ ''' a more interesting test '''
+ set_module_args({
+ 'state': 'present',
+ 'element_username': 'element_username',
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['add'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ # It may not be a good idea to start with apply
+ # More atomic methods can be easier to mock
+ # apply() is calling list_accounts() and add_account()
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error creating account element_username: %s' % ADD_ERROR
+ assert exc.value.args[0]['msg'] == message
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_vlan.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_vlan.py
new file mode 100644
index 000000000..e2dc51f79
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_vlan.py
@@ -0,0 +1,343 @@
+''' unit test for Ansible module: na_elementsw_account.py '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import pytest
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan \
+ import ElementSWVlan as vlan # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+ADD_ERROR = 'some_error_in_add_account'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ class Vlan(object):
+ def __init__(self, entries):
+ self.__dict__.update(entries)
+
+ def __init__(self, force_error=False, where=None):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+
+ def list_virtual_networks(self, virtual_network_tag=None): # pylint: disable=unused-argument
+ ''' list of vlans '''
+ if virtual_network_tag == '1':
+ add1 = self.Bunch(
+ start='2.2.2.2',
+ size=4
+ )
+ add2 = self.Bunch(
+ start='3.3.3.3',
+ size=4
+ )
+ vlan = self.Bunch(
+ attributes={'key': 'value', 'config-mgmt': 'ansible', 'event-source': 'na_elementsw_vlan'},
+ name="test",
+ address_blocks=[
+ add1,
+ add2
+ ],
+ svip='192.168.1.2',
+ gateway='0.0.0.0',
+ netmask='255.255.248.0',
+ namespace=False
+ )
+ vlans = self.Bunch(
+ virtual_networks=[vlan]
+ )
+ else:
+ vlans = self.Bunch(
+ virtual_networks=[]
+ )
+ return vlans
+
+ def add_virtual_network(self, virtual_network_tag=None, **create): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'add' in self.where:
+ # The module does not check for a specific exception :(
+ raise OSError(ADD_ERROR)
+
+ def remove_virtual_network(self, virtual_network_tag=None): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'remove' in self.where:
+ # The module does not check for a specific exception :(
+ raise OSError(ADD_ERROR)
+
+ def modify_virtual_network(self, virtual_network_tag=None, **modify): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'modify' in self.where:
+ # The module does not check for a specific exception :(
+ raise OSError(ADD_ERROR)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ vlan()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def mock_args(self):
+ args = {
+ 'state': 'present',
+ 'name': 'test',
+ 'vlan_tag': 1,
+ 'address_blocks': [
+ {'start': '192.168.1.2', 'size': 5}
+ ],
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ 'netmask': '255.255.248.0',
+ 'gateway': '0.0.0.0',
+ 'namespace': False,
+ 'svip': '192.168.1.2'
+ }
+ return dict(args)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp_elementsw_module.NaElementSWModule.set_element_attributes')
+ def test_successful_create(self, mock_set_attributes, mock_create_sf_connection):
+ ''' successful create'''
+ mock_set_attributes.return_value = {'key': 'new_value'}
+ data = self.mock_args()
+ data['vlan_tag'] = '3'
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_successful_delete(self, mock_create_sf_connection):
+ ''' successful delete'''
+ data = self.mock_args()
+ data['state'] = 'absent'
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_successful_modify(self, mock_create_sf_connection):
+ ''' successful modify'''
+ data = self.mock_args()
+ data['svip'] = '3.4.5.6'
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ @patch('ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan.ElementSWVlan.get_network_details')
+ def test_successful_modify_address_blocks_same_length(self, mock_get, mock_create_sf_connection):
+ ''' successful modify'''
+ mock_get.return_value = {
+ 'address_blocks': [
+ {'start': '10.10.10.20', 'size': 5},
+ {'start': '10.10.10.40', 'size': 5}
+ ]
+ }
+ data = self.mock_args()
+ data['address_blocks'] = [{'start': '10.10.10.20', 'size': 5},
+ {'start': '10.20.10.50', 'size': 5}]
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ @patch('ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan.ElementSWVlan.get_network_details')
+ def test_successful_modify_address_blocks_different_length_1(self, mock_get, mock_create_sf_connection):
+ ''' successful modify'''
+ mock_get.return_value = {
+ 'address_blocks': [
+ {'start': '10.10.10.20', 'size': 5},
+ {'start': '10.20.10.30', 'size': 5}
+ ]
+ }
+ data = self.mock_args()
+ data['address_blocks'] = [{'start': '10.10.10.20', 'size': 5},
+ {'start': '10.20.10.30', 'size': 5},
+ {'start': '10.20.10.50', 'size': 5}]
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ @patch('ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan.ElementSWVlan.get_network_details')
+ def test_successful_modify_address_blocks_different_length_2(self, mock_get, mock_create_sf_connection):
+ ''' successful modify'''
+ mock_get.return_value = {
+ 'address_blocks': [
+ {'start': '10.10.10.20', 'size': 5},
+ {'start': '10.20.10.30', 'size': 5},
+ {'start': '10.20.10.40', 'size': 5}
+ ]
+ }
+ data = self.mock_args()
+ data['address_blocks'] = [{'start': '10.10.10.20', 'size': 5},
+ {'start': '10.20.10.40', 'size': 5},
+ {'start': '10.20.10.30', 'size': 5}]
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ @patch('ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_vlan.ElementSWVlan.get_network_details')
+ def test_successful_modify_address_blocks_different_length_3(self, mock_get, mock_create_sf_connection):
+ ''' successful modify'''
+ mock_get.return_value = {
+ 'address_blocks': [
+ {'start': '10.10.10.20', 'size': 5},
+ {'start': '10.10.10.30', 'size': 5},
+ {'start': '10.20.10.40', 'size': 5}
+ ]
+ }
+ data = self.mock_args()
+ data['address_blocks'] = [{'start': '10.10.10.20', 'size': 5},
+ {'start': '10.20.10.40', 'size': 5},
+ {'start': '10.20.10.30', 'size': 5}]
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_helper_validate_keys(self, mock_create_sf_connection):
+ '''test validate_keys()'''
+ data = self.mock_args()
+ del data['svip']
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.validate_keys()
+ msg = "One or more required fields ['address_blocks', 'svip', 'netmask', 'name'] for creating VLAN is missing"
+ assert exc.value.args[0]['msg'] == msg
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_successful_modify_idempotent(self, mock_create_sf_connection):
+ ''' successful modify'''
+ data = self.mock_args()
+ data['address_blocks'] = [{'start': '2.2.2.2', 'size': 4},
+ {'start': '3.3.3.3', 'size': 4}]
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_successful_modify_attribute_value(self, mock_create_sf_connection):
+ ''' successful modify'''
+ data = self.mock_args()
+ data['address_blocks'] = [{'start': '2.2.2.2', 'size': 4},
+ {'start': '3.3.3.3', 'size': 4}]
+ data['attributes'] = {'key': 'value2'}
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_successful_modify_attribute_key(self, mock_create_sf_connection):
+ ''' successful modify'''
+ data = self.mock_args()
+ data['address_blocks'] = [{'start': '2.2.2.2', 'size': 4},
+ {'start': '3.3.3.3', 'size': 4}]
+ data['attributes'] = {'key2': 'value2'}
+ set_module_args(data)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = vlan()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ assert exc.value.args[0]['changed']
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_volume.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_volume.py
new file mode 100644
index 000000000..926dda90b
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules/test_na_elementsw_volume.py
@@ -0,0 +1,364 @@
+''' unit test for Ansible module: na_elementsw_volume.py '''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.tests.unit.compat.mock import patch
+import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
+
+if not netapp_utils.has_sf_sdk():
+ pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK')
+
+from ansible_collections.netapp.elementsw.plugins.modules.na_elementsw_volume \
+ import ElementSWVolume as my_module # module under test
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+CREATE_ERROR = 'create', 'some_error_in_create_volume'
+MODIFY_ERROR = 'modify', 'some_error_in_modify_volume'
+DELETE_ERROR = 'delete', 'some_error_in_delete_volume'
+
+POLICY_ID = 888
+POLICY_NAME = 'element_qos_policy_name'
+VOLUME_ID = 777
+VOLUME_NAME = 'element_volume_name'
+
+
+class MockSFConnection(object):
+ ''' mock connection to ElementSW host '''
+
+ class Bunch(object): # pylint: disable=too-few-public-methods
+ ''' create object with arbitrary attributes '''
+ def __init__(self, **kw):
+ ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 '''
+ setattr(self, '__dict__', kw)
+
+ def __init__(self, force_error=False, where=None, with_qos_policy_id=True):
+ ''' save arguments '''
+ self.force_error = force_error
+ self.where = where
+ self.with_qos_policy_id = with_qos_policy_id
+
+ def list_qos_policies(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build qos_policy list '''
+ qos_policy_name = POLICY_NAME
+ qos = self.Bunch(min_iops=1000, max_iops=20000, burst_iops=20000)
+ qos_policy = self.Bunch(name=qos_policy_name, qos_policy_id=POLICY_ID, qos=qos)
+ qos_policy_1 = self.Bunch(name=qos_policy_name + '_1', qos_policy_id=POLICY_ID + 1, qos=qos)
+ qos_policies = [qos_policy, qos_policy_1]
+ qos_policy_list = self.Bunch(qos_policies=qos_policies)
+ return qos_policy_list
+
+ def list_volumes_for_account(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build volume list: volume.name, volume.id '''
+ volume = self.Bunch(name=VOLUME_NAME, volume_id=VOLUME_ID, delete_time='')
+ volumes = [volume]
+ volume_list = self.Bunch(volumes=volumes)
+ return volume_list
+
+ def list_volumes(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' build volume details: volume.name, volume.id '''
+ if self.with_qos_policy_id:
+ qos_policy_id = POLICY_ID
+ else:
+ qos_policy_id = None
+ qos = self.Bunch(min_iops=1000, max_iops=20000, burst_iops=20000)
+ volume = self.Bunch(name=VOLUME_NAME, volume_id=VOLUME_ID, delete_time='', access='rw',
+ account_id=1, qos=qos, qos_policy_id=qos_policy_id, total_size=1000000000,
+ attributes={'config-mgmt': 'ansible', 'event-source': 'na_elementsw_volume'}
+ )
+ volumes = [volume]
+ volume_list = self.Bunch(volumes=volumes)
+ return volume_list
+
+ def get_account_by_name(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' returns account_id '''
+ if self.force_error and 'get_account_id' in self.where:
+ account_id = None
+ else:
+ account_id = 1
+ account = self.Bunch(account_id=account_id)
+ result = self.Bunch(account=account)
+ return result
+
+ def create_volume(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'create_exception' in self.where:
+ raise netapp_utils.solidfire.common.ApiServerError(*CREATE_ERROR)
+
+ def modify_volume(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ print("modify: %s, %s " % (repr(args), repr(kwargs)))
+ if self.force_error and 'modify_exception' in self.where:
+ raise netapp_utils.solidfire.common.ApiServerError(*MODIFY_ERROR)
+
+ def delete_volume(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'delete_exception' in self.where:
+ raise netapp_utils.solidfire.common.ApiServerError(*DELETE_ERROR)
+
+ def purge_deleted_volume(self, *args, **kwargs): # pylint: disable=unused-argument
+ ''' We don't check the return code, but could force an exception '''
+ if self.force_error and 'delete_exception' in self.where:
+ raise netapp_utils.solidfire.common.ApiServerError(*DELETE_ERROR)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ ARGS = {
+ 'state': 'present',
+ 'name': VOLUME_NAME,
+ 'account_id': 'element_account_id',
+ 'qos': {'minIOPS': 1000, 'maxIOPS': 20000, 'burstIOPS': 20000},
+ 'qos_policy_name': POLICY_NAME,
+ 'size': 1,
+ 'enable512e': True,
+ 'hostname': 'hostname',
+ 'username': 'username',
+ 'password': 'password',
+ }
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_volume(self, mock_create_sf_connection):
+ ''' adding a volume '''
+ args = dict(self.ARGS) # deep copy as other tests can modify args
+ args['name'] += '_1' # new name to force a create
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_or_modify_volume_idempotent_qos_policy(self, mock_create_sf_connection):
+ ''' adding a volume '''
+ args = dict(self.ARGS)
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_add_or_modify_volume_idempotent_qos(self, mock_create_sf_connection):
+ ''' adding a volume '''
+ args = dict(self.ARGS)
+ args.pop('qos_policy_name') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(with_qos_policy_id=False)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_delete_volume(self, mock_create_sf_connection):
+ ''' removing a volume '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_delete_volume_idempotent(self, mock_create_sf_connection):
+ ''' removing a volume '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ args['name'] += '_1' # new name to force idempotency
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_volume_qos(self, mock_create_sf_connection):
+ ''' modifying a volume '''
+ args = dict(self.ARGS)
+ args['qos'] = {'minIOPS': 2000}
+ args.pop('qos_policy_name') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(with_qos_policy_id=False)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_volume_qos_policy_to_qos(self, mock_create_sf_connection):
+ ''' modifying a volume '''
+ args = dict(self.ARGS)
+ args['qos'] = {'minIOPS': 2000}
+ args.pop('qos_policy_name') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_volume_qos_policy(self, mock_create_sf_connection):
+ ''' modifying a volume '''
+ args = dict(self.ARGS)
+ args['qos_policy_name'] += '_1'
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_volume_qos_to_qos_policy(self, mock_create_sf_connection):
+ ''' modifying a volume '''
+ args = dict(self.ARGS)
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(with_qos_policy_id=False)
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_create_volume_exception(self, mock_create_sf_connection):
+ ''' creating a volume can raise an exception '''
+ args = dict(self.ARGS)
+ args['name'] += '_1' # new name to force a create
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['create_exception'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error provisioning volume: %s' % args['name']
+ assert exc.value.args[0]['msg'].startswith(message)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_modify_volume_exception(self, mock_create_sf_connection):
+ ''' modifying a volume can raise an exception '''
+ args = dict(self.ARGS)
+ args['qos'] = {'minIOPS': 2000}
+ args.pop('qos_policy_name') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['modify_exception'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error updating volume: %s' % VOLUME_ID
+ assert exc.value.args[0]['msg'].startswith(message)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_delete_volume_exception(self, mock_create_sf_connection):
+ ''' deleting a volume can raise an exception '''
+ args = dict(self.ARGS)
+ args['state'] = 'absent'
+ args.pop('qos') # parameters are mutually exclusive: qos|qos_policy_name
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection(force_error=True, where=['delete_exception'])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = 'Error deleting volume: %s' % VOLUME_ID
+ assert exc.value.args[0]['msg'].startswith(message)
+
+ @patch('ansible_collections.netapp.elementsw.plugins.module_utils.netapp.create_sf_connection')
+ def test_check_error_reporting_on_non_existent_qos_policy(self, mock_create_sf_connection):
+ ''' report error if qos option is not given on create '''
+ args = dict(self.ARGS)
+ args['name'] += '_1' # new name to force a create
+ args.pop('qos')
+ args['qos_policy_name'] += '_2'
+ set_module_args(args)
+ # my_obj.sfe will be assigned a MockSFConnection object:
+ mock_create_sf_connection.return_value = MockSFConnection()
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print(exc.value.args[0])
+ message = "Cannot find qos policy with name/id: %s" % args['qos_policy_name']
+ assert exc.value.args[0]['msg'] == message
diff --git a/ansible_collections/netapp/elementsw/tests/unit/plugins/modules_utils/test_netapp_module.py b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules_utils/test_netapp_module.py
new file mode 100644
index 000000000..171a7bae5
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/plugins/modules_utils/test_netapp_module.py
@@ -0,0 +1,149 @@
+# Copyright (c) 2018 NetApp
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests for module_utils netapp_module.py '''
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.netapp.elementsw.tests.unit.compat import unittest
+from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule as na_helper
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def test_get_cd_action_create(self):
+ ''' validate cd_action for create '''
+ current = None
+ desired = {'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_cd_action(current, desired)
+ assert result == 'create'
+
+ def test_get_cd_action_delete(self):
+ ''' validate cd_action for delete '''
+ current = {'state': 'absent'}
+ desired = {'state': 'absent'}
+ my_obj = na_helper()
+ result = my_obj.get_cd_action(current, desired)
+ assert result == 'delete'
+
+ def test_get_cd_action(self):
+ ''' validate cd_action for returning None '''
+ current = None
+ desired = {'state': 'absent'}
+ my_obj = na_helper()
+ result = my_obj.get_cd_action(current, desired)
+ assert result is None
+
+ def test_get_modified_attributes_for_no_data(self):
+ ''' validate modified attributes when current is None '''
+ current = None
+ desired = {'name': 'test'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired)
+ assert result == {}
+
+ def test_get_modified_attributes(self):
+ ''' validate modified attributes '''
+ current = {'name': ['test', 'abcd', 'xyz', 'pqr'], 'state': 'present'}
+ desired = {'name': ['abcd', 'abc', 'xyz', 'pqr'], 'state': 'absent'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired)
+ assert result == desired
+
+ def test_get_modified_attributes_for_intersecting_mixed_list(self):
+ ''' validate modified attributes for list diff '''
+ current = {'name': [2, 'four', 'six', 8]}
+ desired = {'name': ['a', 8, 'ab', 'four', 'abcd']}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'name': ['a', 'ab', 'abcd']}
+
+ def test_get_modified_attributes_for_intersecting_list(self):
+ ''' validate modified attributes for list diff '''
+ current = {'name': ['two', 'four', 'six', 'eight']}
+ desired = {'name': ['a', 'six', 'ab', 'four', 'abc']}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'name': ['a', 'ab', 'abc']}
+
+ def test_get_modified_attributes_for_nonintersecting_list(self):
+ ''' validate modified attributes for list diff '''
+ current = {'name': ['two', 'four', 'six', 'eight']}
+ desired = {'name': ['a', 'ab', 'abd']}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'name': ['a', 'ab', 'abd']}
+
+ def test_get_modified_attributes_for_list_of_dicts_no_data(self):
+ ''' validate modified attributes for list diff '''
+ current = None
+ desired = {'address_blocks': [{'start': '10.20.10.40', 'size': 5}]}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {}
+
+ def test_get_modified_attributes_for_intersecting_list_of_dicts(self):
+ ''' validate modified attributes for list diff '''
+ current = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}]}
+ desired = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'address_blocks': [{'start': '10.20.10.40', 'size': 5}]}
+
+ def test_get_modified_attributes_for_nonintersecting_list_of_dicts(self):
+ ''' validate modified attributes for list diff '''
+ current = {'address_blocks': [{'start': '10.10.10.23', 'size': 5}, {'start': '10.10.10.30', 'size': 5}]}
+ desired = {'address_blocks': [{'start': '10.20.10.23', 'size': 5}, {'start': '10.20.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'address_blocks': [{'start': '10.20.10.23', 'size': 5}, {'start': '10.20.10.30', 'size': 5}, {'start': '10.20.10.40', 'size': 5}]}
+
+ def test_get_modified_attributes_for_list_diff(self):
+ ''' validate modified attributes for list diff '''
+ current = {'name': ['test', 'abcd'], 'state': 'present'}
+ desired = {'name': ['abcd', 'abc'], 'state': 'present'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired, True)
+ assert result == {'name': ['abc']}
+
+ def test_get_modified_attributes_for_no_change(self):
+ ''' validate modified attributes for same data in current and desired '''
+ current = {'name': 'test'}
+ desired = {'name': 'test'}
+ my_obj = na_helper()
+ result = my_obj.get_modified_attributes(current, desired)
+ assert result == {}
+
+ def test_is_rename_action_for_empty_input(self):
+ ''' validate rename action for input None '''
+ source = None
+ target = None
+ my_obj = na_helper()
+ result = my_obj.is_rename_action(source, target)
+ assert result == source
+
+ def test_is_rename_action_for_no_source(self):
+ ''' validate rename action when source is None '''
+ source = None
+ target = 'test2'
+ my_obj = na_helper()
+ result = my_obj.is_rename_action(source, target)
+ assert result is False
+
+ def test_is_rename_action_for_no_target(self):
+ ''' validate rename action when target is None '''
+ source = 'test2'
+ target = None
+ my_obj = na_helper()
+ result = my_obj.is_rename_action(source, target)
+ assert result is True
+
+ def test_is_rename_action(self):
+ ''' validate rename action '''
+ source = 'test'
+ target = 'test2'
+ my_obj = na_helper()
+ result = my_obj.is_rename_action(source, target)
+ assert result is False
diff --git a/ansible_collections/netapp/elementsw/tests/unit/requirements.txt b/ansible_collections/netapp/elementsw/tests/unit/requirements.txt
new file mode 100644
index 000000000..dde1958f1
--- /dev/null
+++ b/ansible_collections/netapp/elementsw/tests/unit/requirements.txt
@@ -0,0 +1 @@
+solidfire-sdk-python ; python_version >= '2.7'