summaryrefslogtreecommitdiffstats
path: root/ansible_collections/netapp/cloudmanager
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/netapp/cloudmanager')
-rw-r--r--ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/bug_report.yml210
-rw-r--r--ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/feature_request.yml100
-rw-r--r--ansible_collections/netapp/cloudmanager/.github/workflows/coverage.yml45
-rw-r--r--ansible_collections/netapp/cloudmanager/.github/workflows/main.yml47
-rw-r--r--ansible_collections/netapp/cloudmanager/CHANGELOG.rst325
-rw-r--r--ansible_collections/netapp/cloudmanager/COPYING674
-rw-r--r--ansible_collections/netapp/cloudmanager/FILES.json1006
-rw-r--r--ansible_collections/netapp/cloudmanager/MANIFEST.json37
-rw-r--r--ansible_collections/netapp/cloudmanager/README.md262
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/.plugin-cache.yaml81
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/changelog.yaml374
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/config.yaml32
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3803.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3844.yaml4
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3909.yaml4
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3910.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3911.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3912.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3913.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3922.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3946.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3947.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3948.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3965.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3967.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3975.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3984.yaml4
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3985.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3995.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4021.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4065.yaml5
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4105.yaml13
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4118.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4136.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4164.yaml4
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4200.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4201.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4205.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4223.yaml4
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4264.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4267.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4271.yaml4
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4281.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4292.yaml8
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4298.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4303.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4321.yaml4
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4327.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4328.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4358.yaml3
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4386.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4416.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4458.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4492.yaml5
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4500.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4513.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4516.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4542.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4563.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4567.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4647.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4703.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4758.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4820.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5002.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5151.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5252.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5307.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5342.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5366.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5437.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5452.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5472.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5527.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5540.yaml2
-rw-r--r--ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5562.yaml3
-rw-r--r--ansible_collections/netapp/cloudmanager/execution_environments/README.md34
-rw-r--r--ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/execution-environment.yml10
-rw-r--r--ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/requirements.yml13
-rw-r--r--ansible_collections/netapp/cloudmanager/execution_environments/from_github/execution-environment.yml10
-rw-r--r--ansible_collections/netapp/cloudmanager/execution_environments/from_github/requirements.yml18
-rw-r--r--ansible_collections/netapp/cloudmanager/execution_environments/requirements.txt1
-rw-r--r--ansible_collections/netapp/cloudmanager/kubectl.sha2561
-rw-r--r--ansible_collections/netapp/cloudmanager/meta/execution-environment.yml3
-rw-r--r--ansible_collections/netapp/cloudmanager/meta/runtime.yml17
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/README.md31
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/doc_fragments/netapp.py48
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp.py332
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp_module.py1381
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aggregate.py332
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aws_fsx.py458
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cifs_server.py265
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_aws.py655
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_azure.py591
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_gcp.py644
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_aws.py855
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_azure.py746
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_gcp.py858
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_info.py235
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_nss_account.py192
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_snapmirror.py471
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_volume.py660
-rw-r--r--ansible_collections/netapp/cloudmanager/requirements.txt10
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/compat/__init__.py0
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/compat/builtins.py33
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/compat/mock.py122
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/compat/unittest.py44
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp.py506
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module.py578
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module_open.py77
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aggregate.py297
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aws_fsx.py165
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cifs_server.py252
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_aws.py730
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_azure.py178
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_gcp.py407
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_aws.py426
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_azure.py439
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_gcp.py543
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_info.py591
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_nss_account.py144
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_snapmirror.py176
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_volume.py216
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/requirements-azure.txt1
-rw-r--r--ansible_collections/netapp/cloudmanager/tests/unit/requirements.txt10
125 files changed, 18166 insertions, 0 deletions
diff --git a/ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/bug_report.yml b/ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 000000000..2e7814624
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,210 @@
+---
+name: 🐛 Bug report
+description: Create a report to help us improve
+
+body:
+- type: markdown
+ attributes:
+ value: >
+ **Thank you for wanting to report a bug in netapp.cloudmanager!**
+
+
+ ⚠
+ Verify first that your issue is not [already reported on
+ GitHub][issue search] and keep in mind that we may have to keep
+ the current behavior because [every change breaks someone's
+ workflow][XKCD 1172].
+ We try to be mindful about this.
+
+ Also test if the latest release and devel branch are affected too.
+
+
+ **Tip:** If you are seeking community support, please consider
+ [Join our Slack community][ML||IRC].
+
+
+
+ [ML||IRC]:
+ https://join.slack.com/t/netapppub/shared_invite/zt-njcjx2sh-1VR2mEDvPcJAmPutOnP~mg
+
+ [issue search]: ../search?q=is%3Aissue&type=issues
+
+ [XKCD 1172]: https://xkcd.com/1172/
+
+
+- type: textarea
+ attributes:
+ label: Summary
+ description: Explain the problem briefly below.
+ placeholder: >-
+ When I try to do X with netapp.cloudmanager from the devel branch on GitHub, Y
+ breaks in a way Z under the env E. Here are all the details I know
+ about this problem...
+ validations:
+ required: true
+
+- type: input
+ attributes:
+ label: Component Name
+ description: >
+ Write the short name of the rst file, module, plugin, task or
+ feature below, *use your best guess if unsure*.
+
+
+ **Tip:** Cannot find it in this repository? Please be advised that
+ the source for some parts of the documentation are hosted outside
+ of this repository. If the page you are reporting describes
+ modules/plugins/etc that are not officially supported by the
+ Ansible Core Engineering team, there is a good chance that it is
+ coming from one of the [Ansible Collections maintained by the
+ community][collections org]. If this is the case, please make sure
+ to file an issue under the appropriate project there instead.
+
+
+ [collections org]: /ansible-collections
+ placeholder: dnf, apt, yum, pip, user etc.
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Ansible Version
+ description: >-
+ Paste verbatim output from `ansible --version` below, under
+ the prompt line. Please don't wrap it with tripple backticks — your
+ whole input will be turned into a code snippet automatically.
+ render: console
+ value: |
+ $ ansible --version
+ placeholder: |
+ $ ansible --version
+ ansible [core 2.11.0b4.post0] (detached HEAD ref: refs/) last updated 2021/04/02 00:33:35 (GMT +200)
+ config file = None
+ configured module search path = ['~/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
+ ansible python module location = ~/src/github/ansible/ansible/lib/ansible
+ ansible collection location = ~/.ansible/collections:/usr/share/ansible/collections
+ executable location = bin/ansible
+ python version = 3.9.0 (default, Oct 26 2020, 13:08:59) [GCC 10.2.0]
+ jinja version = 2.11.3
+ libyaml = True
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Cloud Manager Collection Version
+ description: >-
+ Cloud Manager Collection Version. Run `ansible-galaxy collection` and copy the entire output
+ render: console
+ value: |
+ $ ansible-galaxy collection list
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Playbook
+ description: >-
+ The task from the playbook that is give you the issue
+ render: console
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Steps to Reproduce
+ description: |
+ Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ value: |
+ <!--- Paste example playbooks or commands between quotes below -->
+ ```yaml (paste below)
+
+ ```
+ placeholder: |
+ 1. Implement the following playbook:
+
+ ```yaml
+ ---
+ # ping.yml
+ - hosts: all
+ gather_facts: false
+ tasks:
+ - ping:
+ ...
+ ```
+ 2. Then run `ANSIBLE_DEBUG=1 ansible-playbook ping.yml -vvvvv`
+ 3. An error occurs.
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Expected Results
+ description: >-
+ Describe what you expected to happen when running the steps above.
+ placeholder: >-
+ I expected X to happen because I assumed Y and was shocked
+ that it did not.
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Actual Results
+ description: |
+ Describe what actually happened. If possible run with extra verbosity (`-vvvv`).
+
+ Paste verbatim command output and don't wrap it with tripple backticks — your
+ whole input will be turned into a code snippet automatically.
+ render: console
+ placeholder: >-
+ Certificate did not match expected hostname: files.pythonhosted.org. Certificate: {'notAfter': 'Apr 28 19:20:25 2021 GMT', 'subjectAltName': ((u'DNS', 'r.ssl.fastly.net'), (u'DNS', '*.catchpoint.com'), (u'DNS', '*.cnn.io'), (u'DNS', '*.dollarshaveclub.com'), (u'DNS', '*.eater.com'), (u'DNS', '*.fastly.picmonkey.com'), (u'DNS', '*.files.saymedia-content.com'), (u'DNS', '*.ft.com'), (u'DNS', '*.meetupstatic.com'), (u'DNS', '*.nfl.com'), (u'DNS', '*.pagar.me'), (u'DNS', '*.picmonkey.com'), (u'DNS', '*.realself.com'), (u'DNS', '*.sbnation.com'), (u'DNS', '*.shakr.com'), (u'DNS', '*.streamable.com'), (u'DNS', '*.surfly.com'), (u'DNS', '*.theverge.com'), (u'DNS', '*.thrillist.com'), (u'DNS', '*.vox-cdn.com'), (u'DNS', '*.vox.com'), (u'DNS', '*.voxmedia.com'), (u'DNS', 'eater.com'), (u'DNS', 'ft.com'), (u'DNS', 'i.gse.io'), (u'DNS', 'picmonkey.com'), (u'DNS', 'realself.com'), (u'DNS', 'static.wixstatic.com'), (u'DNS', 'streamable.com'), (u'DNS', 'surfly.com'), (u'DNS', 'theverge.com'), (u'DNS', 'vox-cdn.com'), (u'DNS', 'vox.com'), (u'DNS', 'www.joyent.com')), 'subject': ((('countryName', u'US'),), (('stateOrProvinceName', u'California'),), (('localityName', u'San Francisco'),), (('organizationName', u'Fastly, Inc'),), (('commonName', u'r.ssl.fastly.net'),))}
+ Exception:
+ Traceback (most recent call last):
+ File "/usr/local/lib/python2.6/dist-packages/pip/basecommand.py", line 215, in main
+ status = self.run(options, args)
+ File "/usr/local/lib/python2.6/dist-packages/pip/commands/install.py", line 335, in run
+ wb.build(autobuilding=True)
+ File "/usr/local/lib/python2.6/dist-packages/pip/wheel.py", line 749, in build
+ self.requirement_set.prepare_files(self.finder)
+ File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 380, in prepare_files
+ ignore_dependencies=self.ignore_dependencies))
+ File "/usr/local/lib/python2.6/dist-packages/pip/req/req_set.py", line 620, in _prepare_file
+ session=self.session, hashes=hashes)
+ File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 821, in unpack_url
+ hashes=hashes
+ File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 659, in unpack_http_url
+ hashes)
+ File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 853, in _download_http_url
+ stream=True,
+ File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 521, in get
+ return self.request('GET', url, **kwargs)
+ File "/usr/local/lib/python2.6/dist-packages/pip/download.py", line 386, in request
+ return super(PipSession, self).request(method, url, *args, **kwargs)
+ File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 508, in request
+ resp = self.send(prep, **send_kwargs)
+ File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/sessions.py", line 618, in send
+ r = adapter.send(request, **kwargs)
+ File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/cachecontrol/adapter.py", line 47, in send
+ resp = super(CacheControlAdapter, self).send(request, **kw)
+ File "/usr/local/lib/python2.6/dist-packages/pip/_vendor/requests/adapters.py", line 506, in send
+ raise SSLError(e, request=request)
+ SSLError: HTTPSConnectionPool(host='files.pythonhosted.org', port=443): Max retries exceeded with url: /packages/ef/ab/aa12712415809bf698e719b307419f953e25344e8f42d557533d7a02b276/netapp_lib-2020.7.16-py2-none-any.whl (Caused by SSLError(CertificateError("hostname 'files.pythonhosted.org' doesn't match either of 'r.ssl.fastly.net', '*.catchpoint.com', '*.cnn.io', '*.dollarshaveclub.com', '*.eater.com', '*.fastly.picmonkey.com', '*.files.saymedia-content.com', '*.ft.com', '*.meetupstatic.com', '*.nfl.com', '*.pagar.me', '*.picmonkey.com', '*.realself.com', '*.sbnation.com', '*.shakr.com', '*.streamable.com', '*.surfly.com', '*.theverge.com', '*.thrillist.com', '*.vox-cdn.com', '*.vox.com', '*.voxmedia.com', 'eater.com', 'ft.com', 'i.gse.io', 'picmonkey.com', 'realself.com', 'static.wixstatic.com', 'streamable.com', 'surfly.com', 'theverge.com', 'vox-cdn.com', 'vox.com', 'www.joyent.com'",),))
+ ERROR: Command "/usr/bin/python2.6 /root/ansible/test/lib/ansible_test/_data/quiet_pip.py install --disable-pip-version-check -r /root/ansible/test/lib/ansible_test/_data/requirements/units.txt -r tests/unit/requirements.txt -c /root/ansible/test/lib/ansible_test/_data/requirements/constraints.txt" returned exit status 2.
+ ERROR: Command "docker exec d47eb360db4ce779c1f690db964655b76e68895c4360ff252c46fe7fe6f5c75a /usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/root/ansible_collections/netapp/ontap LC_ALL=en_US.UTF-8 /usr/bin/python3.6 /root/ansible/bin/ansible-test units --metadata tests/output/.tmp/metadata-9i2qfrcl.json --truncate 200 --redact --color yes --requirements --python default --requirements-mode only" returned exit status 1.
+ validations:
+ required: true
+
+
+- type: markdown
+ attributes:
+ value: >
+ *One last thing...*
+
+
+ Thank you for your collaboration!
+
+
+...
diff --git a/ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/feature_request.yml b/ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 000000000..8a76456de
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,100 @@
+---
+name: ✨ Feature request
+description: Suggest an idea for this project
+
+body:
+- type: markdown
+ attributes:
+ value: >
+ **Thank you for wanting to suggest a feature for netapp.cloudmanager!**
+
+ 💡
+ Before you go ahead with your request, please first consider if it
+ would be useful for majority of the netapp.cloudmanager users. As a
+ general rule of thumb, any feature that is only of interest to a
+ small sub group should be [implemented in a third-party Ansible
+ Collection][contribute to collections] or maybe even just your
+ project alone. Be mindful of the fact that the essential
+ netapp.cloudmanager features have a broad impact.
+
+
+ <details>
+ <summary>
+ ❗ Every change breaks someone's workflow.
+ </summary>
+
+
+ [![❗ Every change breaks someone's workflow.
+ ](https://imgs.xkcd.com/comics/workflow.png)
+ ](https://xkcd.com/1172/)
+ </details>
+
+
+ ⚠
+ Verify first that your idea is not [already requested on
+ GitHub][issue search].
+
+ Also test if the main branch does not already implement this.
+
+
+- type: textarea
+ attributes:
+ label: Summary
+ description: >
+ Describe the new feature/improvement you would like briefly below.
+
+
+ What's the problem this feature will solve?
+
+ What are you trying to do, that you are unable to achieve
+ with netapp.cloudmanager as it currently stands?
+
+
+ * Provide examples of real-world use cases that this would enable
+ and how it solves the problem you described.
+
+ * How do you solve this now?
+
+ * Have you tried to work around the problem using other tools?
+
+ * Could there be a different approach to solving this issue?
+
+ placeholder: >-
+ I am trying to do X with netapp.cloudmanager from the devel branch on GitHub and
+ I think that implementing a feature Y would be very helpful for me and
+ every other user of netapp.cloudmanager because of Z.
+ validations:
+ required: true
+
+- type: input
+ attributes:
+ label: Component Name
+ description: >
+ Write the short name of the module, plugin, task or feature below,
+ *use your best guess if unsure*.
+
+
+ [collections org]: /ansible-collections
+ placeholder: dnf, apt, yum, pip, user etc.
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Additional Information
+ description: |
+ Describe how the feature would be used, why it is needed and what it would solve.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ value: |
+ <!--- Paste example playbooks or commands between quotes below -->
+ ```yaml (paste below)
+
+ ```
+ placeholder: >-
+ I asked on https://stackoverflow.com/.... and the community
+ advised me to do X, Y and Z.
+ validations:
+ required: true
+
+...
diff --git a/ansible_collections/netapp/cloudmanager/.github/workflows/coverage.yml b/ansible_collections/netapp/cloudmanager/.github/workflows/coverage.yml
new file mode 100644
index 000000000..eebbe7fb8
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/.github/workflows/coverage.yml
@@ -0,0 +1,45 @@
+name: NetApp.cloudmanager Ansible Coverage
+
+on:
+ push:
+ pull_request:
+ schedule:
+ - cron: '0 6 * * *'
+
+jobs:
+ sanity:
+ name: Coverage on Cloudmanager
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v2
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.8
+
+ - name: Install ansible stable-2.12
+ run: pip install https://github.com/ansible/ansible/archive/stable-2.12.tar.gz --disable-pip-version-check
+
+ - name: Make directory to make ansible-test happy
+ run: |
+ pwd
+ mkdir -p ansible_collections/netapp/cloudmanager/
+ rsync -av . ansible_collections/netapp/cloudmanager/ --exclude ansible_collections/netapp/cloudmanager/
+
+ - name: Run Unit Tests
+ run: ansible-test units --coverage --color --docker --python 3.8
+ working-directory: ansible_collections/netapp/cloudmanager/
+
+ # ansible-test support producing code coverage date
+ - name: Generate coverage report
+ run: ansible-test coverage xml -v --requirements --group-by command --group-by version
+ working-directory: ansible_collections/netapp/cloudmanager/
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v2
+ with:
+ working-directory: ansible_collections/netapp/cloudmanager/
+ verbose: true
diff --git a/ansible_collections/netapp/cloudmanager/.github/workflows/main.yml b/ansible_collections/netapp/cloudmanager/.github/workflows/main.yml
new file mode 100644
index 000000000..2548b1099
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/.github/workflows/main.yml
@@ -0,0 +1,47 @@
+name: NetApp.cloudmanager Ansible CI
+
+on:
+ push:
+ pull_request:
+ schedule:
+ - cron: '0 6 * * *'
+
+jobs:
+ sanity:
+ name: Sanity (${{ matrix.ansible }} on Cloudmanager
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ fail-fast: false
+ ansible:
+ - stable-2.9
+ - stable-2.10
+ - stable-2.11
+ - stable-2.12
+ - stable-2.13
+ - stable-2.14
+ - devel
+
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v2
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ # Ansible 2.14 requires 3.9 as a minimum
+ python-version: 3.9
+
+ - name: Install ansible (${{ matrix.ansible }})
+ run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check
+
+ - name: Make directory to make ansible-test happy
+ run: |
+ pwd
+ mkdir -p ansible_collections/netapp/cloudmanager/
+ rsync -av . ansible_collections/netapp/cloudmanager/ --exclude ansible_collections/netapp/cloudmanager/
+
+
+ - name: Run sanity tests Cloudmanager
+ run: ansible-test sanity --docker -v --color
+ working-directory: ansible_collections/netapp/cloudmanager/
diff --git a/ansible_collections/netapp/cloudmanager/CHANGELOG.rst b/ansible_collections/netapp/cloudmanager/CHANGELOG.rst
new file mode 100644
index 000000000..262da39fa
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/CHANGELOG.rst
@@ -0,0 +1,325 @@
+============================================
+NetApp CloudManager Collection Release Notes
+============================================
+
+.. contents:: Topics
+
+
+v21.22.0
+========
+
+Minor Changes
+-------------
+
+- Add ``svm_name`` option in CVO for AWS, AZURE and GCP creation and update.
+
+v21.21.0
+========
+
+Minor Changes
+-------------
+
+- na_cloudmanager_connector_azure - expose connector managed system identity principal_id to perform role assignment
+- na_cloudmanager_cvo_azure - Add new ``storage_type`` value Premium_ZRS
+- na_cloudmanager_cvo_azure - Add parameter ``availability_zone_node1`` and ``availability_zone_node2`` for CVO Azure HA location
+
+v21.20.1
+========
+
+Bugfixes
+--------
+
+- new meta/execution-environment.yml is failing ansible-builder sanitize step.
+
+v21.20.0
+========
+
+Minor Changes
+-------------
+
+- Add ``availability_zone`` option in CVO Azure on the location configuration.
+- Add ``subnet_path`` option in CVO GCP.
+- na_cloudmanager_cvo_aws - Add new parameter ``cluster_key_pair_name`` to support SSH authentication method key pair.
+- na_cloudmanager_volume - Support AWS FsxN working environment.
+
+Bugfixes
+--------
+
+- na_cloudmanager_connector_gcp - Fix default machine_type value on the GCP connector.
+
+v21.19.0
+========
+
+Minor Changes
+-------------
+
+- Support ``writing_speed_state`` modification on AWS, AZURE and GCP CVOs.
+
+v21.18.0
+========
+
+Minor Changes
+-------------
+
+- na_cloudmanager_connector_azure - Support full ``subnet_id`` and ``vnet_id``.
+
+v21.17.0
+========
+
+Minor Changes
+-------------
+
+- na_cloudmanager_aws_fsx - Import AWS FSX to CloudManager by adding new parameters ``import_file_system`` and ``file_system_id``.
+- na_cloudmanager_connector_azure - Support user defined ``storage_account`` name. The ``storage_account`` can be created automatically. When ``storage_account`` is not set, the name is constructed by appending 'sa' to the connector ``name``.
+- na_cloudmanager_cvo_aws - Support license_type update
+- na_cloudmanager_cvo_azure - Support license_type update
+- na_cloudmanager_cvo_gcp - Support license_type update
+
+v21.16.0
+========
+
+Minor Changes
+-------------
+
+- na_cloudmanager_connector_gcp - when using the user application default credential authentication by running the command gcloud auth application-default login, ``gcp_service_account_path`` is not needed.
+
+Bugfixes
+--------
+
+- Add check when volume is capacity tiered.
+- na_cloudmanager_connector_azure - Fix string formatting error when deleting the connector.
+
+v21.15.0
+========
+
+Minor Changes
+-------------
+
+- Add the description of client_id based on the cloudmanager UI.
+- Set license_type default value 'capacity-paygo' for single node 'ha-capacity-paygo' for HA and 'capacity_package_name' value 'Essential'
+
+v21.14.0
+========
+
+Minor Changes
+-------------
+
+- na_cloudmanager_snapmirror - Add FSX to snapmirror.
+
+Bugfixes
+--------
+
+- CVO working environment clusterProperties is deprecated. Make changes accordingly. Add CVO update status check on ``instance_type`` change.
+
+v21.13.0
+========
+
+Minor Changes
+-------------
+
+- Add ``update_svm_password`` for ``svm_password`` update on AWS, AZURE and GCP CVOs. Update ``svm_password`` if ``update_svm_password`` is true.
+- Add ontap image upgrade on AWS, AZURE and GCP CVOs if ``upgrade_ontap_version`` is true and ``ontap_version`` is provided with a specific version. ``use_latest_version`` has to be false.
+- na_cloudmanager_connector_aws - automatically fetch client_id and instance_id for delete.
+- na_cloudmanager_connector_aws - make the module idempotent for create and delete.
+- na_cloudmanager_connector_aws - report client_id and instance_id if connector already exists.
+- na_cloudmanager_cvo_aws - Support instance_type update
+- na_cloudmanager_cvo_azure - Support instance_type update
+- na_cloudmanager_cvo_gcp - Support instance_type update
+- na_cloudmanager_info - new subsets - account_info, agents_info, active_agents_info
+- na_cloudmanager_volume - Report error if the volume properties cannot be modified. Add support ``tiering_policy`` and ``snapshot_policy_name`` modification.
+
+Bugfixes
+--------
+
+- na_cloudmanager_cvo_gcp - handle extra two auto-gen GCP labels to prevent update ``gcp_labels`` failure.
+
+New Modules
+-----------
+
+- netapp.cloudmanager.na_cloudmanager_aws_fsx - Cloud ONTAP file system(FSX) in AWS
+
+v21.12.1
+========
+
+Bugfixes
+--------
+
+- na_cloudmanager_connector_aws - Fix default ami not based on the region in resource file
+- na_cloudmanager_snapmirror - report actual error rather than None with "Error getting destination info".
+
+v21.12.0
+========
+
+Minor Changes
+-------------
+
+- PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+.
+- na_cloudmanager_cvo_azure - Add extra tag handling on azure_tag maintenance
+- na_cloudmanager_cvo_gcp - Add extra label hendling for HA and only allow add new labels on gcp_labels
+- na_cloudmanager_snapmirror - working environment get information api not working for onprem is fixed
+
+Bugfixes
+--------
+
+- Fix cannot find working environment if ``working_environment_name`` is provided
+
+v21.11.0
+========
+
+Minor Changes
+-------------
+
+- Add CVO modification unit tests
+- Adding new parameter ``capacity_package_name`` for all CVOs creation with capacity based ``license_type`` capacity-paygo or ha-capacity-paygo for HA.
+- all modules - better error reporting if refresh_token is not valid.
+- na_cloudmanager_connector_gcp - automatically fetch client_id for delete.
+- na_cloudmanager_connector_gcp - make the module idempotent for create and delete.
+- na_cloudmanager_connector_gcp - report client_id if connector already exists.
+- na_cloudmanager_cvo_aws - Add unit tests for capacity based license support.
+- na_cloudmanager_cvo_azure - Add unit tests for capacity based license support.
+- na_cloudmanager_cvo_gcp - Add unit tests for capacity based license support and delete cvo.
+- netapp.py - improve error handling with error content.
+
+Bugfixes
+--------
+
+- na_cloudmanager_connector_gcp - typeError when using proxy certificates.
+
+v21.10.0
+========
+
+Minor Changes
+-------------
+
+- Only these parameters will be modified on the existing CVOs. svm_passowrd will be updated on each run.
+- na_cloudmanager_cvo_aws - Support update on svm_password, tier_level, and aws_tag.
+- na_cloudmanager_cvo_aws - add new parameter ``kms_key_id`` and ``kms_key_arn`` as AWS encryption parameters to support AWS CVO encryption
+- na_cloudmanager_cvo_azure - Add new parameter ``ha_enable_https`` for HA CVO to enable the HTTPS connection from CVO to storage accounts. This can impact write performance. The default is false.
+- na_cloudmanager_cvo_azure - Support update on svm_password, tier_level, and azure_tag.
+- na_cloudmanager_cvo_azure - add new parameter ``azure_encryption_parameters`` to support AZURE CVO encryption
+- na_cloudmanager_cvo_gcp - Support update on svm_password, tier_level, and gcp_labels.
+- na_cloudmanager_cvo_gcp - add new parameter ``gcp_encryption_parameters`` to support GCP CVO encryption
+
+Bugfixes
+--------
+
+- na_cloudmanager_snapmirror - key error CloudProviderName for ONPREM operation
+
+v21.9.0
+=======
+
+Minor Changes
+-------------
+
+- na_cloudmanager - Support pd-balanced in ``gcp_volume_type`` for CVO GCP, ``provider_volume_type`` in na_cloudmanager_snapmirror and na_cloudmanager_volume.
+- na_cloudmanager_connector_azure - Change default value of ``virtual_machine_size`` to Standard_DS3_v2.
+- na_cloudmanager_cvo_gcp - Add selflink support on subnet_id, vpc0_node_and_data_connectivity, vpc1_cluster_connectivity, vpc2_ha_connectivity, vpc3_data_replication, subnet0_node_and_data_connectivity, subnet1_cluster_connectivity, subnet2_ha_connectivity, and subnet3_data_replication.
+
+v21.8.0
+=======
+
+Major Changes
+-------------
+
+- Adding stage environment to all modules in cloudmanager
+
+Minor Changes
+-------------
+
+- na_cloudmanager - Support service account with new options ``sa_client_id`` and ``sa_secret_key`` to use for API operations.
+
+Bugfixes
+--------
+
+- na_cloudmanager_aggregate - accept client_id end with or without 'clients'
+- na_cloudmanager_cifs_server - accept client_id end with or without 'clients'
+- na_cloudmanager_connector_aws - accept client_id end with or without 'clients'
+- na_cloudmanager_connector_azure - accept client_id end with or without 'clients'
+- na_cloudmanager_connector_gcp - accept client_id end with or without 'clients'
+- na_cloudmanager_cvo_aws - accept client_id end with or without 'clients'
+- na_cloudmanager_cvo_azure - accept client_id end with or without 'clients'
+- na_cloudmanager_cvo_gcp - accept client_id end with or without 'clients'
+- na_cloudmanager_info - accept client_id end with or without 'clients'
+- na_cloudmanager_nss_account - accept client_id end with or without 'clients'
+- na_cloudmanager_snapmirror - accept client_id end with or without 'clients'
+- na_cloudmanager_volume - accept client_id end with or without 'clients'
+
+v21.7.0
+=======
+
+Minor Changes
+-------------
+
+- na_cloudmanager_aggregate - Add provider_volume_type gp3 support.
+- na_cloudmanager_connector_gcp - rename option ``service_account_email`` and ``service_account_path`` to ``gcp_service_account_email`` and ``gcp_service_account_path`` respectively.
+- na_cloudmanager_cvo_aws - Add ebs_volume_type gp3 support.
+- na_cloudmanager_snapmirror - Add provider_volume_type gp3 support.
+- na_cloudmanager_volume - Add aggregate_name support on volume creation.
+- na_cloudmanager_volume - Add provider_volume_type gp3 support.
+
+Bugfixes
+--------
+
+- na_cloudmanager_aggregate - Improve error message
+- na_cloudmanager_connector_azure - Add subnet_name as aliases of subnet_id, vnet_name as aliases of vnet_id.
+- na_cloudmanager_connector_azure - Fix KeyError client_id
+- na_cloudmanager_cvo_gcp - Apply network_project_id check on vpc1_cluster_connectivity, vpc2_ha_connectivity, vpc3_data_replication, subnet1_cluster_connectivity, subnet2_ha_connectivity, subnet3_data_replication
+- na_cloudmanager_nss_account - Improve error message
+- na_cloudmanager_volume - Improve error message
+
+v21.6.0
+=======
+
+Bugfixes
+--------
+
+- na_cloudmanager_cifs_server - Fix incorrect API call when is_workgroup is true
+- na_cloudmanager_connector_azure - Change client_id as optional
+- na_cloudmanager_connector_azure - Fix python error - msrest.exceptions.ValidationError. Parameter 'Deployment.properties' can not be None.
+- na_cloudmanager_connector_azure - Fix wrong example on the document and update account_id is required field on deletion.
+- na_cloudmanager_cvo_gcp - Change vpc_id from optional to required.
+
+New Modules
+-----------
+
+- netapp.cloudmanager.na_cloudmanager_snapmirror - NetApp Cloud Manager SnapMirror
+
+v21.5.0
+=======
+
+Minor Changes
+-------------
+
+- na_cloudmanager_connector_aws - Return newly created Azure client ID in cloud manager, instance ID and account ID. New option ``proxy_certificates``.
+- na_cloudmanager_cvo_aws - Return newly created AWS working_environment_id.
+- na_cloudmanager_cvo_azure - Return newly created AZURE working_environment_id.
+- na_cloudmanager_cvo_gcp - Return newly created GCP working_environment_id.
+
+Bugfixes
+--------
+
+- na_cloudmanager_cvo_aws - Fix incorrect placement of platformSerialNumber in the resulting json structure
+
+v21.4.0
+=======
+
+New Modules
+-----------
+
+- netapp.cloudmanager.na_cloudmanager_connector_azure - NetApp Cloud Manager connector for Azure.
+- netapp.cloudmanager.na_cloudmanager_connector_gcp - NetApp Cloud Manager connector for GCP.
+- netapp.cloudmanager.na_cloudmanager_cvo_azure - NetApp Cloud Manager CVO/working environment in single or HA mode for Azure.
+- netapp.cloudmanager.na_cloudmanager_info - NetApp Cloud Manager info
+
+v21.3.0
+=======
+
+New Modules
+-----------
+
+- netapp.cloudmanager.na_cloudmanager_aggregate - NetApp Cloud Manager Aggregate
+- netapp.cloudmanager.na_cloudmanager_cifs_server - NetApp Cloud Manager cifs server
+- netapp.cloudmanager.na_cloudmanager_connector_aws - NetApp Cloud Manager connector for AWS
+- netapp.cloudmanager.na_cloudmanager_cvo_aws - NetApp Cloud Manager CVO for AWS
+- netapp.cloudmanager.na_cloudmanager_nss_account - NetApp Cloud Manager nss account
+- netapp.cloudmanager.na_cloudmanager_volume - NetApp Cloud Manager volume
diff --git a/ansible_collections/netapp/cloudmanager/COPYING b/ansible_collections/netapp/cloudmanager/COPYING
new file mode 100644
index 000000000..94a9ed024
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/COPYING
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/ansible_collections/netapp/cloudmanager/FILES.json b/ansible_collections/netapp/cloudmanager/FILES.json
new file mode 100644
index 000000000..bc550db32
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/FILES.json
@@ -0,0 +1,1006 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "execution_environments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "execution_environments/requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ab031084649c1857b4f90b7ed68ee3f530d51892ca81846bfbdd4657550cccc",
+ "format": 1
+ },
+ {
+ "name": "execution_environments/from_galaxy",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "execution_environments/from_galaxy/execution-environment.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4a1200215a7804d7f5407d160f23bfb6f5da7d3b15fd06df5bc7b0820e35879",
+ "format": 1
+ },
+ {
+ "name": "execution_environments/from_galaxy/requirements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9079c32340fc566d2c9352283abfd96fedd35d06e736f637e57cbfa1732b5513",
+ "format": 1
+ },
+ {
+ "name": "execution_environments/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "881dc2d94eb8af2fdea0ff74effa171a81cf0200013720242f87a920f044d2c6",
+ "format": 1
+ },
+ {
+ "name": "execution_environments/from_github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "execution_environments/from_github/execution-environment.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4a1200215a7804d7f5407d160f23bfb6f5da7d3b15fd06df5bc7b0820e35879",
+ "format": 1
+ },
+ {
+ "name": "execution_environments/from_github/requirements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92a16aeff7681eb4188ce1140409d685ff71e00ffe940065fa1c2e888c470e88",
+ "format": 1
+ },
+ {
+ "name": "requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aaff48dee8012c4d4002fe11b7addd01d4439dbc4a06620a65e7ad75d9cead37",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/netapp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c93c345f0b4049d69b114933fb59b79f9d63ce8e65717c6f1c1a8801721977d",
+ "format": 1
+ },
+ {
+ "name": "plugins/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c82ee692702ec1dd604cdbc38ff252114e5204e1b0627045a66c9451e7a918ac",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/netapp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dfd1e767b765a237c77dcd6961e2b413f1a6c268f041e371c39e986273c6a235",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/netapp_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b5329d63aec46a1d4f9bbcd50b2117269f50eae6db319ba0d2ccd26cdcc90020",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cloudmanager_cvo_gcp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "604ae4f223375285adc537eecf75529c19bfc56229c457ba6532bb646e14f0a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cloudmanager_cvo_azure.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7199e129aa6938baa4e01b1bffc3ae39a0c91e395e5374aebd21f9168771e3ec",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cloudmanager_aggregate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77d48b15245ed5503206eab9491463a873e39e7bb3dd2e875c1124eb38d11181",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cloudmanager_cifs_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09e71987c20d1bdbc163f0a447e4a267a8ee14c3402f4e88fcb8d17d8074ba06",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cloudmanager_nss_account.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5118fa9fb7da5d866393b299a13024f605c2cd00888b9cae58415a3795ff86f3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cloudmanager_connector_aws.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "01cec957530493b5de9a91d26b3ea498b60292e9b7850a4b93af43f9e442b1e5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cloudmanager_aws_fsx.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5814b7a0b08b996862489ed00f1168fc1e166cea5a68db98ea99170830def196",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cloudmanager_connector_azure.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8fbdf14385275e7bff2fc06e7f80f332cf6e6492435878e5101b2cbeb20e02d3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cloudmanager_snapmirror.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4180ef0fe86de31699e1dc515b0c92b01a2991e17543a2676e0cb8f2ef39c733",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cloudmanager_cvo_aws.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "166e348ed61b69b4eba8e0f284c4120fe28cb83bb0c8cac74bd894ace231cac7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cloudmanager_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84ae777fa4e1ac39c69dd5e6bb85254cb4298759cfa290a256a5d0c55144d2ee",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cloudmanager_connector_gcp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35af2e93d64674305e68f8b081260a347aaa507640a634d2262a36981e0b6926",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cloudmanager_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90df6c82602c4ef95ac785cf7e39ff27276ccc34492a367a37911d9e3de401a4",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/unittest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cba95d18c5b39c6f49714eacf1ac77452c2e32fa087c03cf01aacd19ae597b0f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/builtins.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ca4cac919e166b25e601e11acb01f6957dddd574ff0a62569cb994a5ecb63e1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/mock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc3268366a078318126a00095e160b71ce50940415c07b5cbeed3e99b09b5777",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_netapp_module_open.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "781ebe6901008b71e0d0845e8b17c67634cd5169f0153d287fb57cde90289637",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_netapp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba96ce33232f13bd447b2fb68b0fa9a69d35885b6baf79fd5c0df5a0b2a64672",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_netapp_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "62f6297e685f7f5856ff20d24394a42a3c8e63a4f59cb0aa8d60fd7cce7b76f1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_cloudmanager_cifs_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7722c49e69cfb0615f3ed6f73923492b542063d87c855a742514d53c5d950e5f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_cloudmanager_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8466aaf93284df6abe33f8bdc8c84c2a41bc639c828567133c70f0652e9e87f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_cloudmanager_nss_account.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22e5e9c0ebb9656ab518bfd5330bc716c9a740f2a06454e0938581634e255b79",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_cloudmanager_aws_fsx.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6333f22900738d4bc76a7f262f958e2ad97503906d6bf84b1a9931ab4f3f1972",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_cloudmanager_connector_gcp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9952d7009e407b1f226ffaa0b1c52de283840ecbae8eff1da93c11182f9b033",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_cloudmanager_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1c34a75b6274d58213992c304c40273e9d3eef56f2ae9eccea5f51924228676",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_cloudmanager_snapmirror.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40b48c2698d1e0cfb303096ba23faa8a6ca3853c903043ff328ab07d1d7f2858",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_cloudmanager_cvo_gcp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c33120714b80513bd39534ff3f5089b4bf2c4001279027c8f5cd8379378ebae",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_cloudmanager_aggregate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b274d85e9ec933868cde835d78003d801ef0aabf029759caf5e5ea94104c7afa",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_cloudmanager_connector_azure.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0f7d00007858264d38bf6ede76669d56fe398eeb0c3faad91ebedb097015bfa",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_cloudmanager_cvo_azure.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f8bdc0f62d3a106aea51e251716cf78b2de46750dd1371242dcffcd10dd16cb5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_cloudmanager_cvo_aws.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a671c7291d57ece9aee85657a17037feafc5bc8d3d05861036107dd0662fed9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_na_cloudmanager_connector_aws.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a8528fdf73b6f007dcaf41c7babdadccf6ccacf92c986d26f3f5ebb3c88d42da",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/requirements-azure.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "551b05366d802821dc9869616601ddb67ffd2a9231aab37f32f4812ca81afcb0",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/execution-environment.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec1875745063d448e6d9ad63042abac6f55d1e627f1237303b1944cec7c38bdc",
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "01b491a2a0634f29873f6063d5badbae8c3643209884a000db8610b80f44dd43",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4205.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66baa4e18249a32876ebe811915dbe67ef1ed0bef7c2f139b6de2252b55e739b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4065.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "094ddd1061505ce3f998eb6e88b6711261ee2aa27cedd49567010fb1ca37888e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4136.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e31cf34c68e72a5a332e9e072356a3a9a7b1ec6ecbecb3a0348f925d076ec8d8",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4458.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c43e507a39aa3f9efa2bb17c08dbb4e7698691709e36b344ffbb2f3e0eb9cd67",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4321.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b98d00d32fa143d99acf45492fa9bf2a703c6e0c0bac80cdafc9002f7db3bff",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4264.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "890bd065b20b9c677f72c820d2eae695d93682cfd8cd7e341bc44b1ecf0fadea",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-5527.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4cea8b8c57f922c325a2562afe212b36c163a365a3067797972491a31f1dd8d",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3975.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d054ac4f644b1df01315550caf3147bd203e40eb8b89719dafcc198b9baa155",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3922.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77324f4e6a616a46a6ebe8c946f460402085308a341e4c00c8ec3007784470cf",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4567.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2731d533d63daa8869a1b08950b623b590fb85f84c6a4af02b37e6153c861e8b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-5252.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e4c96f10c645cc8c6f90590c19b843af8a92f14cbe0c48b9e55417bd934b330",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3909.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d7b5abb05c691646e6f19d2518a5cb45a8627d53a8699eb01b0ceb0a14eafd1",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4327.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95c492d780e4b8391ca5038c4a81ea1b785d97597df20b44135b5de24e89c482",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3948.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e49f738c3d4bf82c3e7aa8cb0a24e048d0604c4ad42cd859cad0ba7e997370b9",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4223.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d2769624054e9fa53590b3d9e6ad80d77883c6f2c90032925439c83c7ac7084",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4281.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e0f401fcd9e3352e374cc9896814fff575f5cce759b4cb1cba960c16e35139b3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3913.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82b14dfb837c6a93e4cd4607f5d85d2d306e886f307a0db2d358c79fd41c2385",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3912.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fdc080b861bdf15100f087943826a3e90d89305ac9e08bd003edbd3d689215f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4500.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1442536d70262642ad0679f123831bde8e1c55d130e1becbaef18b0d79fda382",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-5307.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f7d721907b0e0c0852f8f72af7f03be88cb20472e0c69ef972b9b2d48af61fb",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4516.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43fe4986b1e6e4ff2645327952d9f544a0991fca979abc019d227a25c3614e52",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-5002.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e6ae72782a58aeb83ee733b05d3c4f203212aea971b2dbf662e5dc0bfde7e162",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-5452.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4f04c0c2b957701f044b1c7368fc09fbbab47df39cf7e684553c9e67c2e41eb",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-5540.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6a3e77590b685362b8938b251e72f1b64e9b15a14bb146fafb21dd31b1a1fa1",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-5151.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d30b752e9d852e98255c44780372c40d9b3e2d437c1998dd4fcb857823127cdc",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-5472.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fcc0d28fc6dba864e49c460f8186503a813185e295c3afeb70f6876fcbf79cf5",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-5366.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d69efccb38cd8d8a90203194a7512a52f2992e8af2e7aed5eab6961dcbffcdea",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3965.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "970ab4ad82b69749b11887cd16c8124ff22cc0da582e35a2a7ce166ecb9ef5f0",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3911.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e76a7d85e3758bb4dc408749d27c941ef40b50798e7b828cdaadf56f8210ab2a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3946.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af2f9ff445807a9fb42a800476523197177f1672df2e49c8046190f3f2b0d43b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3803.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0003d0beb209eeae803209ba732ee929d512296f21c9bef59414fa19cb90c62f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4542.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95100f5e2baa68ce684750f9c94e46856fc540058ebb5d6be9251feb949bbfee",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4647.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87387e0deec568b28ecbfb30f940db75d167e6cd64e243636853a2a0e18fd380",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4201.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3a29e73f83aba3c999cef2cac0f6ca68f782dd1b329fa0da0b599c2d1955731",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3985.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dd94e353c355e752fa0aad037737434e0ed7f75baa28807262acfe2a82ae672d",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3984.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "517de92be276ebc5969fbe0607ec28b2c3beef28277ed801d0d3e79f173abd39",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4200.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "691f2cee49e9b38de09ed9610de8ab36752cb1dbc3ca24c5a668747548de6cdf",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4703.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fca33074db70f061a8ade1c912be6ae6a9d547f325f22d01a33bb3fe01dbfa9c",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4492.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e0a584e4d7b93629917afc8394c6fdb4006cc966acbd202772d1d17ee83c3c31",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4328.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4993359bcd8c731369af85883062d02112c3c1cc70e2b15d8e95bdab2af9312",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3947.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11588e2084d79a358eaa4af53302a36e97c6c1ad9af8c49ea55e60acfac5848b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4386.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a00aa58145d60dd02b2241efc57f5c83e3610aadb0b420e89512b7d2e5aa3e0",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3910.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23c0a4cbd8fe7a4792b4f8805235d3f29184264269f5b31fc4f7571a7f156d58",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4105.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d205ac596e3e955783b7f67cb24a366d712a4843caa3445398018ba1f13a8787",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3967.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f325180ed56fc1c530271a04fd115bae3ec32fb3df279f0f08bf40e8324c0c72",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4563.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d784283f7af8b1de73a1d69a4834a59a765cc13843f3ccca096c28001309fb31",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4164.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18bd434037808c9aa267775f708f5b0c256b46661549fb45a77f660ad9c6831d",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4021.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b86b7ef5fb28e98b22b7838fd746b4efb3684723e22d7e989559c9f0c9c0a38",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4758.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4254c34a29d4a8e5a86ad03ccac987f2ff39cf2874bad260098fa3c68fe91ec6",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-5562.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c775744a3c2674be85f94e2d03b65454514f89d102d7419e2b87f7e1ac4b342",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4298.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea8da93d3fad1c5e838daf20aa028258263220f475c19c33b69f57f2eda6013e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4513.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d2b56b351170342cd33ef993ee94f5f843a690b43b2ab3251e5a905f5c0421b2",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3844.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3f27181c95c1242f5b8e3c91db76276634007240dc007e24fa16617a231fa19",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-3995.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "794848f5493e7b8f795351792f2b0faf3fd26bdec6ca61032d8ccbcc91949c67",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4303.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a9fc2f16a2d6a55c911f584702739fe95f65c6810e58b02a68b44997a798127",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4118.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09bc1877922fbe266d127a44cd170cbaf7c76da75b99d74f8744e68e659bf730",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4358.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d16ee9221c9d89fefecdfa8087fb7674ec6bdbbe26a8290fa44a0862015fb8d3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4820.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "593d4956f27b7d20c8a4a98d966eae48cc5a2b39e4481716bf687c467a0e2a56",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4267.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ede30ad3ca2a74a71971f801f40350fdf75daf02da17d65b8e50c58e2963058",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4271.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "709b16cc1317def4369fd8bd414d4f795de8f64f395f8f60eb6d818dacd5bdee",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-5437.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e635402bc11724322624fe36b412b5f6fe32a59e1bb998a52b6d6d97e0df6b6",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4292.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3dbe54f0e1f3106c3de0b49e9775325eae0604bd861995a8470755421c9a7a93",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-4416.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4224db573f34caeeb956c8728eb343a47bc2729d898001a4c6a671b780dae1bf",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/DEVOPS-5342.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a50a71e12e405ca5439e4845d7eb4a6cb76c7cde924a9eda4929bb8cdd67b3f3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "394b4a5fd0611f8e0391646242d99fb4d5262aed1a2c673b0e83a35532b3fa81",
+ "format": 1
+ },
+ {
+ "name": "changelogs/.plugin-cache.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be7a40a0d78be9817a8bc07e43867f18d03f5ccee1cb3e89ac265ecf88ae17aa",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "346a3fe8d6d060262187ae2c2b4f2fec26cb62bee57056a22bc748d365ae4f21",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69c1d5ad062564e51ea093e92a8a8a53369776964d998bc118fc6f763576cd20",
+ "format": 1
+ },
+ {
+ "name": "COPYING",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903",
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/coverage.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd3d45d5278e9d4fdc6d2ccf4b4e218199dac954ceda06b15c613020c158e249",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2db808e4f01ee3ea3924ba5adba02a3ee3ed33c5a1540a0a04892bb1ab4fb2f7",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/feature_request.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e4ed8909f732d6bd64277ad63b19ba377db566b749048de9fff2834bc88f0d0",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/bug_report.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f309598150491c76f6fd83a5da0c0964b7835117b5909128b0d64661c33025fc",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7a9c516b250389edf58a4b047e10b4b5bd3d8e30d2547d00c3846ab63406b49",
+ "format": 1
+ },
+ {
+ "name": "kubectl.sha256",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "26fa5d2141ec23edea21153680baee49b5d545bf4fe574301befabf7ca83a025",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/MANIFEST.json b/ansible_collections/netapp/cloudmanager/MANIFEST.json
new file mode 100644
index 000000000..f1768abba
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/MANIFEST.json
@@ -0,0 +1,37 @@
+{
+ "collection_info": {
+ "namespace": "netapp",
+ "name": "cloudmanager",
+ "version": "21.22.0",
+ "authors": [
+ "NetApp Ansible Team <ng-ansibleteam@netapp.com>"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "netapp",
+ "cvo",
+ "cloudmanager",
+ "amazon",
+ "cloud",
+ "storage",
+ "azure",
+ "gcp"
+ ],
+ "description": "Ansible collection to create CloudManager connectors, CVO instances, CVO aggregates, CVO volumes, and more.",
+ "license": [],
+ "license_file": "COPYING",
+ "dependencies": {},
+ "repository": "https://github.com/ansible-collections/netapp.cloudmanager",
+ "documentation": "https://docs.ansible.com/ansible/latest/collections/netapp/",
+ "homepage": "https://netapp.io/configuration-management-and-automation/",
+ "issues": "https://github.com/ansible-collections/netapp.cloudmanager"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d4c630f97d816aaaf8cc83d416b16c9ebd0dc49ed37386ffbf63f18b11813e7",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/README.md b/ansible_collections/netapp/cloudmanager/README.md
new file mode 100644
index 000000000..f6e794a46
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/README.md
@@ -0,0 +1,262 @@
+[![Documentation](https://img.shields.io/badge/docs-brightgreen.svg)](https://docs.ansible.com/ansible/devel/collections/netapp/cloudmanager/index.html)
+![example workflow](https://github.com/ansible-collections/netapp.cloudmanager/actions/workflows/main.yml/badge.svg)
+[![codecov](https://codecov.io/gh/ansible-collections/netapp.cloudmanager/branch/main/graph/badge.svg?token=weBYkksxSi)](https://codecov.io/gh/ansible-collections/netapp.cloudmanager)
+[![Discord](https://img.shields.io/discord/855068651522490400)](https://discord.gg/NetApp)
+# Ansible Collection - netapp.cloudmanager
+
+Copyright (c) 2022 NetApp, Inc. All rights reserved.
+Specifications subject to change without notice.
+
+This collection requires python 3.5 or better.
+
+# Installation
+```bash
+ansible-galaxy collection install netapp.cloudmanager
+```
+To use this collection, add the following to the top of your playbook:
+```
+collections:
+ - netapp.cloudmanager
+```
+# Requirements
+- ansible version >= 2.9
+- requests >= 2.20
+- python version >= '3.5'
+
+# Module documentation
+https://docs.ansible.com/ansible/devel/collections/netapp/cloudmanager/
+
+# Need help
+Join our [Discord](https://discord.gg/NetApp) and look for our #ansible channel.
+
+# Code of Conduct
+This collection follows the [Ansible project's Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html).
+
+# Documentation
+https://github.com/ansible-collections/netapp/wiki
+
+# Release Notes
+
+## 21.22.0
+ - Add `svm_name` option in AWS, AZURE and GCP CVO for creation and update.
+
+## 21.21.0
+
+### Minor Changes
+ - na_cloudmanager_connector_azure - expose connector managed system identity principal_id tp perform role assignment.
+
+### New Options
+ - Add `availability_zone_node1` and `availability_zone_node2` options in CVO Azure HA on the location configuration.
+ - Add new `storage_type` value Premium_ZRS
+
+## 21.20.1
+
+### Bug Fixes
+ - new meta/execution-environment.yml is failing ansible-builder sanitize step.
+
+## 21.20.0
+
+### New Options
+ - Add `availability_zone` option in CVO Azure on the location configuration.
+ - Add `cluster_key_pair_name` option in CVO AWS for SSH authentication key pair method.
+ - Add `subnet_path` option in CVO GCP.
+
+### Bug Fixes
+ - Fix the `machine_type` default value in the connector GCP.
+
+### Minor Changes
+ - na_cloudmanager_volume - Support AWS FsxN working environment
+
+## 21.19.0
+
+### Minor Changes
+ - Support writing_speed_state modification for AWS, AZURE and GCP CVOs.
+
+## 21.18.0
+ - na_cloudmanager_connector_azure - support full subnet_id and vnet_id
+ - Support ``writing_speed_state`` modification for AWS, AZURE and GCP CVOs.
+
+## 21.17.0
+
+### Minor Changes
+ - na_cloudmanager_aws_fsx - Import AWS FSX to CloudManager.
+ - Support ``license_type`` modification for AWS, AZURE and GCP CVOs.
+
+### New Options
+ - na_cloudmanager_connector_azure - Support user defined ``storage_account``. The storage account can be created automatically. When ``storage_account`` is not set, the name is constructed by appending 'sa' to the connector ``name``.
+ - na_cloudmanager_aws_fsx - Import AWS FSX to CloudManager by adding new parameters ``import_file_system`` and ``file_system_id``.
+
+## 21.16.0
+
+### Bug Fixes
+ - na_cloudmanager_volume - Add check when volume is capacity tiered.
+ - na_cloudmanager_connector_azure - Fix string formatting error when deleting the connector.
+
+### Minor Changes
+ - na_cloudmanager_connector_gcp - when using the user application default credential authentication by running the command gcloud auth application-default login, ``gcp_service_account_path`` is not needed.
+
+## 21.15.0
+
+### Minor Changes
+ - Add the description of the client_id based on the cloudmanager UI.
+ - Update ``license_type`` and ``capacity_package_name`` default values on capacity based license.
+
+## 21.14.0
+
+### Minor Changes
+ - na_cloudmanager_snapmirror - add AWS FSx to snapmirror.
+
+### Bug Fixes
+ - CVO working environment clusterProperties is deprecated. Make changes accordingly. Add CVO update status check on `instance_type` change.
+
+## 21.13.0
+
+### New Modules
+ - na_cloudmanager_aws_fsx - NetApp AWS FSX
+
+### Minor Changes
+ - na_cloudmanager_connector_aws - make the module idempotent for create and delete.
+ - na_cloudmanager_connector_aws - automatically fetch client_id and instance_id for delete.
+ - na_cloudmanager_connector_aws - report client_id if connector already exists.
+ - na_cloudmanager_info - new subsets - account_info, agents_info, active_agents_info.
+ - Add ONTAP image upgrade feature for AWS, AZURE and GCP CVOs. Add ``upgrade_ontap_version`` to indicate if upgrade ONTAP is needed. It only can be used when ``use_latest_version`` is false and ``ontap_version`` is a specific version.
+ - Add instance_type update feature for AWS, AZURE and GCP CVOs.
+ - na_cloudmanager_volume - Add ``tiering_policy`` and ``snapshot_policy_name`` modification, and report error if the properties cannot be changed.
+
+### Bug Fixes
+ - na_cloudmanager_cvo_gcp - handle extra auto-gen GCP labels to fix `gcp_labels` update failure.
+ - Add ``update_svm_password`` for ``svm_password`` update on AWS, AZURE and GCP CVOs. Update ``svm_password`` if ``update_svm_password`` is true.
+
+## 21.12.1
+
+### Bug Fixes
+ - na_cloudmanager_connector_aws - fix default ami not found in the region on resource file.
+ - na_cloudmanager_snapmirror - report actual error rather than None with "Error getting destination info".
+
+## 21.12.0
+
+### Minor Changes
+ - Handle extra azure_tag on AZURE CVO and extra gcp_labels on GCP CVO HA on modification. gcp_labels modification on GCP CVO does not support remove labels.
+ - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+.
+
+### Bug Fixes
+ - na_cloudmanager_snapmirror - working environment get information api not working for onprem is fixed.
+ - Fix cannot find working environment if `working_environment_name` is provided.
+
+## 21.11.0
+
+## New Options
+ - Adding new parameter `capacity_package_name` for all CVOs creation with capacity based license type capacity-paygo or ha-capacity-paygo for HA.
+
+### Minor Changes
+ - na_cloudmanager_connector_gcp - make the module idempotent for create and delete.
+ - na_cloudmanager_connector_gcp - automatically fetch client_id for delete.
+ - na_cloudmanager_connector_gcp - report client_id if connector already exists.
+ - all modules - better error reporting if ``refresh_token`` is not valid.
+
+### Bug Fixes
+ - na_cloudmanager_connector_gcp - typeError when using proxy certificates.
+
+## 21.10.0
+
+### Minor Changes
+ - Adding support update on `svm_password`, `tier_level`, `aws_tag`, `azure_tag` and `gcp_labels` for all CVOs. Only these parameters will be modified on the existing CVOs.
+
+### Bug Fixes
+ - na_cloudmanager_snapmirror - key error CloudProviderName for ONPREM operation.
+
+## New Options
+ - Adding new parameter `ha_enable_https` for HA CVO to enable the HTTPS connection from CVO to storage accounts. This can impact write performance. The default is false.
+ - Adding new parameters `kms_key_id` and `kms_key_arn` as AWS encryption parameters for AWS CVO encryption.
+ - Adding new parameter `azure_encryption_parameters` for AZURE CVO encryption.
+ - Adding new parameter `gcp_encryption_parameters` for GCP CVO encryption.
+
+## 21.9.0
+
+### New Options
+ - Adding selflink support on CVO GCP params: `subnet_id`, `vpc0_node_and_data_connectivity`, `vpc1_cluster_connectivity`, `vpc2_ha_connectivity`, `vpc3_data_replication`, `subnet0_node_and_data_connectivity`, `subnet1_cluster_connectivity`, `subnet2_ha_connectivity`, and `subnet3_data_replication`.
+ - Adding pd-balanced support on ``gcp_volume_type`` CVO GCP and ``provider_volume_type`` for na_cloudmanager_snapmirror and na_cloudmanager_volume.
+
+### Bug Fixes
+ - Change `virtual_machine_size` default value to Standard_DS3_v2.
+
+## 21.8.0
+
+### New Options
+ - Adding stage environment to all modules in cloudmanager.
+ - Adding service account support on API operations in cloudmanager: `sa_client_id` and `sa_secret_key`. `refresh_token` will be ignored if service account information is provided.
+
+### Bug Fixes
+ - Accept client_id end with or without 'clients'.
+
+## 21.7.0
+
+### New Options
+ - na_cloudmanager_cvo_aws: Support one new ebs_volume_type gp3.
+ - Adding stage environemt to all modules in cloudmanager.
+ - na_cloudmanager_volume: Add `aggregate_name` support on volume creation.
+ - na_cloudmanager_cvo_aws: Support one new `ebs_volume_type` gp3.
+ - na_cloudmanager_connector_azure: Add `subnet_name` as aliases of `subnet_id`, `vnet_name` as aliases of `vnet_id`.
+ - na_cloudmanager_aggregate - Add ``provider_volume_type`` gp3 support.
+ - na_cloudmanager_volume - Add ``provider_volume_type`` gp3 support.
+ - na_cloudmanager_snapmirror - Add ``provider_volume_type`` gp3 support.
+
+### Bug Fixes
+ - na_cloudmanager_aggregate: Improve error message.
+ - na_cloudmanager_cvo_gcp: Apply `network_project_id` on vpc1_cluster_connectivity, vpc2_ha_connectivity, vpc3_data_replication, subnet1_cluster_connectivity, subnet2_ha_connectivity, subnet3_data_replication.
+ - na_cloudmanager_connector_gcp: rename option `service_account_email` and `service_account_path` to `gcp_service_account_email` and `gcp_service_account_path` respectively.
+ - na_cloudmanager_connector_azure: Fix KeyError client_id.
+ - na_cloudmanager_nss_account: Improve error message.
+ - na_cloudmanager_volume: Improve error message.
+
+## 21.6.0
+
+### New Modules
+ - na_cloudmanager_snapmirror: Create or Delete snapmirror on Cloud Manager.
+
+### Bug Fixes
+ - na_cloudmanager_connector_gcp: Make client_id as optional.
+ - na_cloudmanager_cvo_gcp: Change ``vpc_id`` from optional to required.
+
+## 21.5.1
+
+### Bug fixes
+ - na_cloudmanager_cifs_server: Fix incorrect API call when is_workgroup is true.
+ - na_cloudmanager_connector_azure: Fix python error - msrest.exceptions.ValidationError. Parameter 'Deployment.properties' can not be None.
+ - na_cloudmanager_connector_azure: Fix wrong example on the document and update account_id is required field on deletion.
+
+## 21.5.0
+
+### New Options
+ - na_cloudmanager_connector_aws: Return newly created Azure client ID in cloud manager, instance ID and account ID. New option `proxy_certificates`.
+ - na_cloudmanager_cvo_aws: Return newly created AWS working_environment_id.
+ - na_cloudmanager_cvo_azure: Return newly created AZURE working_environment_id.
+ - na_cloudmanager_cvo_gcp: Return newly created GCP working_environment_id.
+
+## Bug Fixes
+ - na_cloudmanager_cvo_aws: Fix incorrect placement of platformSerialNumber in the resulting json structure.
+
+## 21.4.0
+
+### Module documentation changes
+ - Remove the period at the end of the line on short_description.
+ - Add period at the end of the names in examples.
+ - Add notes mentioning support check_mode.
+
+### New Modules
+ - na_cloudmanager_connector_azure: Create or delete Cloud Manager connector for Azure.
+ - na_cloudmanager_cvo_azure: Create or delete Cloud Manager CVO for AZURE for both single and HA.
+ - na_cloudmanager_info: Gather Cloud Manager subset information using REST APIs. Support for subsets `working_environments_info`, `aggregates_info`, `accounts_info`.
+ - na_cloudmanager_connector_gcp: Create or delete Cloud Manager connector for GCP.
+ - na_cloudmanager_cvo_gcp: Create or delete Cloud Manager CVO for GCP for both single and HA.
+
+## 21.3.0
+
+### New Modules
+ - na_cloudmanager_aggregate: Create or delete an aggregate on Cloud Volumes ONTAP, or add disks on an aggregate.
+ - na_cloudmanager_cifs_server: Create or delete CIFS server for Cloud Volumes ONTAP.
+ - na_cloudmanager_connector_aws: Create or delete Cloud Manager connector for AWS.
+ - na_cloudmanager_cvo_aws: Create or delete Cloud Manager CVO for AWS for both single and HA.
+ - na_cloudmanager_nss_account: Create or delete a nss account on Cloud Manager.
+ - na_cloudmanager_volume: Create, modify or delete a volume on Cloud Volumes ONTAP.
+
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/.plugin-cache.yaml b/ansible_collections/netapp/cloudmanager/changelogs/.plugin-cache.yaml
new file mode 100644
index 000000000..11c23112f
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/.plugin-cache.yaml
@@ -0,0 +1,81 @@
+plugins:
+ become: {}
+ cache: {}
+ callback: {}
+ cliconf: {}
+ connection: {}
+ httpapi: {}
+ inventory: {}
+ lookup: {}
+ module:
+ na_cloudmanager_aggregate:
+ description: NetApp Cloud Manager Aggregate
+ name: na_cloudmanager_aggregate
+ namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules
+ version_added: 21.3.0
+ na_cloudmanager_aws_fsx:
+ description: Cloud ONTAP file system(FSx) in AWS
+ name: na_cloudmanager_aws_fsx
+ namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules
+ version_added: 21.13.0
+ na_cloudmanager_cifs_server:
+ description: NetApp Cloud Manager cifs server
+ name: na_cloudmanager_cifs_server
+ namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules
+ version_added: 21.3.0
+ na_cloudmanager_connector_aws:
+ description: NetApp Cloud Manager connector for AWS
+ name: na_cloudmanager_connector_aws
+ namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules
+ version_added: 21.3.0
+ na_cloudmanager_connector_azure:
+ description: NetApp Cloud Manager connector for Azure.
+ name: na_cloudmanager_connector_azure
+ namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules
+ version_added: 21.4.0
+ na_cloudmanager_connector_gcp:
+ description: NetApp Cloud Manager connector for GCP.
+ name: na_cloudmanager_connector_gcp
+ namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules
+ version_added: 21.4.0
+ na_cloudmanager_cvo_aws:
+ description: NetApp Cloud Manager CVO for AWS
+ name: na_cloudmanager_cvo_aws
+ namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules
+ version_added: 21.3.0
+ na_cloudmanager_cvo_azure:
+ description: NetApp Cloud Manager CVO/working environment in single or HA mode
+ for Azure.
+ name: na_cloudmanager_cvo_azure
+ namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules
+ version_added: 21.4.0
+ na_cloudmanager_cvo_gcp:
+ description: NetApp Cloud Manager CVO for GCP
+ name: na_cloudmanager_cvo_gcp
+ namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules
+ version_added: 21.4.0
+ na_cloudmanager_info:
+ description: NetApp Cloud Manager info
+ name: na_cloudmanager_info
+ namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules
+ version_added: 21.4.0
+ na_cloudmanager_nss_account:
+ description: NetApp Cloud Manager nss account
+ name: na_cloudmanager_nss_account
+ namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules
+ version_added: 21.3.0
+ na_cloudmanager_snapmirror:
+ description: NetApp Cloud Manager SnapMirror
+ name: na_cloudmanager_snapmirror
+ namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules
+ version_added: 21.6.0
+ na_cloudmanager_volume:
+ description: NetApp Cloud Manager volume
+ name: na_cloudmanager_volume
+ namespace: private.var.folders.zv.gm9l2n2n30g2k5tdmny65l_r0000gp.T.antsibull-changelogvn56yfgu.collections.ansible_collections.netapp.cloudmanager.plugins.modules
+ version_added: 21.3.0
+ netconf: {}
+ shell: {}
+ strategy: {}
+ vars: {}
+version: 21.19.0
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/changelog.yaml b/ansible_collections/netapp/cloudmanager/changelogs/changelog.yaml
new file mode 100644
index 000000000..1160305d3
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/changelog.yaml
@@ -0,0 +1,374 @@
+ancestor: null
+releases:
+ 21.10.0:
+ changes:
+ bugfixes:
+ - na_cloudmanager_snapmirror - key error CloudProviderName for ONPREM operation
+ minor_changes:
+ - Only these parameters will be modified on the existing CVOs. svm_passowrd
+ will be updated on each run.
+ - na_cloudmanager_cvo_aws - Support update on svm_password, tier_level, and
+ aws_tag.
+ - na_cloudmanager_cvo_aws - add new parameter ``kms_key_id`` and ``kms_key_arn``
+ as AWS encryption parameters to support AWS CVO encryption
+ - na_cloudmanager_cvo_azure - Add new parameter ``ha_enable_https`` for HA CVO
+ to enable the HTTPS connection from CVO to storage accounts. This can impact
+ write performance. The default is false.
+ - na_cloudmanager_cvo_azure - Support update on svm_password, tier_level, and
+ azure_tag.
+ - na_cloudmanager_cvo_azure - add new parameter ``azure_encryption_parameters``
+ to support AZURE CVO encryption
+ - na_cloudmanager_cvo_gcp - Support update on svm_password, tier_level, and
+ gcp_labels.
+ - na_cloudmanager_cvo_gcp - add new parameter ``gcp_encryption_parameters``
+ to support GCP CVO encryption
+ fragments:
+ - DEVOPS-4065.yaml
+ - DEVOPS-4136.yaml
+ - DEVOPS-4164.yaml
+ - DEVOPS-4200.yaml
+ release_date: '2021-09-01'
+ 21.11.0:
+ changes:
+ bugfixes:
+ - na_cloudmanager_connector_gcp - typeError when using proxy certificates.
+ minor_changes:
+ - Add CVO modification unit tests
+ - Adding new parameter ``capacity_package_name`` for all CVOs creation with
+ capacity based ``license_type`` capacity-paygo or ha-capacity-paygo for HA.
+ - all modules - better error reporting if refresh_token is not valid.
+ - na_cloudmanager_connector_gcp - automatically fetch client_id for delete.
+ - na_cloudmanager_connector_gcp - make the module idempotent for create and
+ delete.
+ - na_cloudmanager_connector_gcp - report client_id if connector already exists.
+ - na_cloudmanager_cvo_aws - Add unit tests for capacity based license support.
+ - na_cloudmanager_cvo_azure - Add unit tests for capacity based license support.
+ - na_cloudmanager_cvo_gcp - Add unit tests for capacity based license support
+ and delete cvo.
+ - netapp.py - improve error handling with error content.
+ fragments:
+ - DEVOPS-4267.yaml
+ - DEVOPS-4292.yaml
+ - DEVOPS-4303.yaml
+ - DEVOPS-4321.yaml
+ - DEVOPS-4327.yaml
+ release_date: '2021-10-06'
+ 21.12.0:
+ changes:
+ bugfixes:
+ - Fix cannot find working environment if ``working_environment_name`` is provided
+ minor_changes:
+ - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+.
+ - na_cloudmanager_cvo_azure - Add extra tag handling on azure_tag maintenance
+ - na_cloudmanager_cvo_gcp - Add extra label hendling for HA and only allow add
+ new labels on gcp_labels
+ - na_cloudmanager_snapmirror - working environment get information api not working
+ for onprem is fixed
+ fragments:
+ - DEVOPS-4328.yaml
+ - DEVOPS-4358.yaml
+ - DEVOPS-4386.yaml
+ - DEVOPS-4416.yaml
+ release_date: '2021-11-03'
+ 21.12.1:
+ changes:
+ bugfixes:
+ - na_cloudmanager_connector_aws - Fix default ami not based on the region in
+ resource file
+ - na_cloudmanager_snapmirror - report actual error rather than None with "Error
+ getting destination info".
+ fragments:
+ - DEVOPS-4298.yaml
+ - DEVOPS-4458.yaml
+ release_date: '2021-11-23'
+ 21.13.0:
+ changes:
+ bugfixes:
+ - na_cloudmanager_cvo_gcp - handle extra two auto-gen GCP labels to prevent
+ update ``gcp_labels`` failure.
+ minor_changes:
+ - Add ``update_svm_password`` for ``svm_password`` update on AWS, AZURE and
+ GCP CVOs. Update ``svm_password`` if ``update_svm_password`` is true.
+ - Add ontap image upgrade on AWS, AZURE and GCP CVOs if ``upgrade_ontap_version``
+ is true and ``ontap_version`` is provided with a specific version. ``use_latest_version``
+ has to be false.
+ - na_cloudmanager_connector_aws - automatically fetch client_id and instance_id
+ for delete.
+ - na_cloudmanager_connector_aws - make the module idempotent for create and
+ delete.
+ - na_cloudmanager_connector_aws - report client_id and instance_id if connector
+ already exists.
+ - na_cloudmanager_cvo_aws - Support instance_type update
+ - na_cloudmanager_cvo_azure - Support instance_type update
+ - na_cloudmanager_cvo_gcp - Support instance_type update
+ - na_cloudmanager_info - new subsets - account_info, agents_info, active_agents_info
+ - na_cloudmanager_volume - Report error if the volume properties cannot be modified.
+ Add support ``tiering_policy`` and ``snapshot_policy_name`` modification.
+ fragments:
+ - DEVOPS-4264.yaml
+ - DEVOPS-4271.yaml
+ - DEVOPS-4492.yaml
+ - DEVOPS-4500.yaml
+ - DEVOPS-4513.yaml
+ - DEVOPS-4542.yaml
+ modules:
+ - description: Cloud ONTAP file system(FSX) in AWS
+ name: na_cloudmanager_aws_fsx
+ namespace: ''
+ release_date: '2022-01-12'
+ 21.14.0:
+ changes:
+ bugfixes:
+ - CVO working environment clusterProperties is deprecated. Make changes accordingly.
+ Add CVO update status check on ``instance_type`` change.
+ minor_changes:
+ - na_cloudmanager_snapmirror - Add FSX to snapmirror.
+ fragments:
+ - DEVOPS-4516.yaml
+ - DEVOPS-4563.yaml
+ release_date: '2022-02-02'
+ 21.15.0:
+ changes:
+ minor_changes:
+ - Add the description of client_id based on the cloudmanager UI.
+ - Set license_type default value 'capacity-paygo' for single node 'ha-capacity-paygo'
+ for HA and 'capacity_package_name' value 'Essential'
+ fragments:
+ - DEVOPS-4647.yaml
+ - DEVOPS-4703.yaml
+ release_date: '2022-03-02'
+ 21.16.0:
+ changes:
+ bugfixes:
+ - Add check when volume is capacity tiered.
+ - na_cloudmanager_connector_azure - Fix string formatting error when deleting
+ the connector.
+ minor_changes:
+ - na_cloudmanager_connector_gcp - when using the user application default credential
+ authentication by running the command gcloud auth application-default login,
+ ``gcp_service_account_path`` is not needed.
+ fragments:
+ - DEVOPS-4567.yaml
+ - DEVOPS-4758.yaml
+ - DEVOPS-4820.yaml
+ release_date: '2022-04-05'
+ 21.17.0:
+ changes:
+ minor_changes:
+ - na_cloudmanager_aws_fsx - Import AWS FSX to CloudManager by adding new parameters
+ ``import_file_system`` and ``file_system_id``.
+ - na_cloudmanager_connector_azure - Support user defined ``storage_account``
+ name. The ``storage_account`` can be created automatically. When ``storage_account``
+ is not set, the name is constructed by appending 'sa' to the connector ``name``.
+ - na_cloudmanager_cvo_aws - Support license_type update
+ - na_cloudmanager_cvo_azure - Support license_type update
+ - na_cloudmanager_cvo_gcp - Support license_type update
+ fragments:
+ - DEVOPS-4223.yaml
+ - DEVOPS-4281.yaml
+ - DEVOPS-5002.yaml
+ release_date: '2022-05-04'
+ 21.18.0:
+ changes:
+ minor_changes:
+ - na_cloudmanager_connector_azure - Support full ``subnet_id`` and ``vnet_id``.
+ fragments:
+ - DEVOPS-5151.yaml
+ release_date: '2022-06-09'
+ 21.19.0:
+ changes:
+ minor_changes:
+ - Support ``writing_speed_state`` modification on AWS, AZURE and GCP CVOs.
+ fragments:
+ - DEVOPS-5252.yaml
+ release_date: '2022-08-03'
+ 21.20.0:
+ changes:
+ bugfixes:
+ - na_cloudmanager_connector_gcp - Fix default machine_type value on the GCP
+ connector.
+ minor_changes:
+ - Add ``availability_zone`` option in CVO Azure on the location configuration.
+ - Add ``subnet_path`` option in CVO GCP.
+ - na_cloudmanager_cvo_aws - Add new parameter ``cluster_key_pair_name`` to support
+ SSH authentication method key pair.
+ - na_cloudmanager_volume - Support AWS FsxN working environment.
+ fragments:
+ - DEVOPS-5307.yaml
+ - DEVOPS-5342.yaml
+ - DEVOPS-5366.yaml
+ - DEVOPS-5437.yaml
+ - DEVOPS-5472.yaml
+ release_date: '2022-10-05'
+ 21.20.1:
+ changes:
+ bugfixes:
+ - new meta/execution-environment.yml is failing ansible-builder sanitize step.
+ fragments:
+ - DEVOPS-5540.yaml
+ release_date: '2022-10-07'
+ 21.21.0:
+ changes:
+ minor_changes:
+ - na_cloudmanager_connector_azure - expose connector managed system identity
+ principal_id to perform role assignment
+ - na_cloudmanager_cvo_azure - Add new ``storage_type`` value Premium_ZRS
+ - na_cloudmanager_cvo_azure - Add parameter ``availability_zone_node1`` and
+ ``availability_zone_node2`` for CVO Azure HA location
+ fragments:
+ - DEVOPS-5527.yaml
+ - DEVOPS-5562.yaml
+ release_date: '2022-11-02'
+ 21.22.0:
+ changes:
+ minor_changes:
+ - Add ``svm_name`` option in CVO for AWS, AZURE and GCP creation and update.
+ fragments:
+ - DEVOPS-5452.yaml
+ release_date: '2022-12-07'
+ 21.3.0:
+ modules:
+ - description: NetApp Cloud Manager Aggregate
+ name: na_cloudmanager_aggregate
+ namespace: ''
+ - description: NetApp Cloud Manager cifs server
+ name: na_cloudmanager_cifs_server
+ namespace: ''
+ - description: NetApp Cloud Manager connector for AWS
+ name: na_cloudmanager_connector_aws
+ namespace: ''
+ - description: NetApp Cloud Manager CVO for AWS
+ name: na_cloudmanager_cvo_aws
+ namespace: ''
+ - description: NetApp Cloud Manager nss account
+ name: na_cloudmanager_nss_account
+ namespace: ''
+ - description: NetApp Cloud Manager volume
+ name: na_cloudmanager_volume
+ namespace: ''
+ release_date: '2021-03-03'
+ 21.4.0:
+ modules:
+ - description: NetApp Cloud Manager connector for Azure.
+ name: na_cloudmanager_connector_azure
+ namespace: ''
+ - description: NetApp Cloud Manager connector for GCP.
+ name: na_cloudmanager_connector_gcp
+ namespace: ''
+ - description: NetApp Cloud Manager CVO/working environment in single or HA mode
+ for Azure.
+ name: na_cloudmanager_cvo_azure
+ namespace: ''
+ - description: NetApp Cloud Manager info
+ name: na_cloudmanager_info
+ namespace: ''
+ release_date: '2021-04-07'
+ 21.5.0:
+ changes:
+ bugfixes:
+ - na_cloudmanager_cvo_aws - Fix incorrect placement of platformSerialNumber
+ in the resulting json structure
+ minor_changes:
+ - na_cloudmanager_connector_aws - Return newly created Azure client ID in cloud
+ manager, instance ID and account ID. New option ``proxy_certificates``.
+ - na_cloudmanager_cvo_aws - Return newly created AWS working_environment_id.
+ - na_cloudmanager_cvo_azure - Return newly created AZURE working_environment_id.
+ - na_cloudmanager_cvo_gcp - Return newly created GCP working_environment_id.
+ fragments:
+ - DEVOPS-3803.yaml
+ - DEVOPS-3844.yaml
+ - DEVOPS-3922.yaml
+ release_date: '2021-04-21'
+ 21.6.0:
+ changes:
+ bugfixes:
+ - na_cloudmanager_cifs_server - Fix incorrect API call when is_workgroup is
+ true
+ - na_cloudmanager_connector_azure - Change client_id as optional
+ - na_cloudmanager_connector_azure - Fix python error - msrest.exceptions.ValidationError.
+ Parameter 'Deployment.properties' can not be None.
+ - na_cloudmanager_connector_azure - Fix wrong example on the document and update
+ account_id is required field on deletion.
+ - na_cloudmanager_cvo_gcp - Change vpc_id from optional to required.
+ fragments:
+ - DEVOPS-3910.yaml
+ - DEVOPS-3911.yaml
+ - DEVOPS-3913.yaml
+ - DEVOPS-3946.yaml
+ - DEVOPS-3948.yaml
+ modules:
+ - description: NetApp Cloud Manager SnapMirror
+ name: na_cloudmanager_snapmirror
+ namespace: ''
+ release_date: '2021-05-06'
+ 21.7.0:
+ changes:
+ bugfixes:
+ - na_cloudmanager_aggregate - Improve error message
+ - na_cloudmanager_connector_azure - Add subnet_name as aliases of subnet_id,
+ vnet_name as aliases of vnet_id.
+ - na_cloudmanager_connector_azure - Fix KeyError client_id
+ - na_cloudmanager_cvo_gcp - Apply network_project_id check on vpc1_cluster_connectivity,
+ vpc2_ha_connectivity, vpc3_data_replication, subnet1_cluster_connectivity,
+ subnet2_ha_connectivity, subnet3_data_replication
+ - na_cloudmanager_nss_account - Improve error message
+ - na_cloudmanager_volume - Improve error message
+ minor_changes:
+ - na_cloudmanager_aggregate - Add provider_volume_type gp3 support.
+ - na_cloudmanager_connector_gcp - rename option ``service_account_email`` and
+ ``service_account_path`` to ``gcp_service_account_email`` and ``gcp_service_account_path``
+ respectively.
+ - na_cloudmanager_cvo_aws - Add ebs_volume_type gp3 support.
+ - na_cloudmanager_snapmirror - Add provider_volume_type gp3 support.
+ - na_cloudmanager_volume - Add aggregate_name support on volume creation.
+ - na_cloudmanager_volume - Add provider_volume_type gp3 support.
+ fragments:
+ - DEVOPS-3909.yaml
+ - DEVOPS-3912.yaml
+ - DEVOPS-3947.yaml
+ - DEVOPS-3967.yaml
+ - DEVOPS-3975.yaml
+ - DEVOPS-3984.yaml
+ - DEVOPS-3985.yaml
+ - DEVOPS-3995.yaml
+ release_date: '2021-06-03'
+ 21.8.0:
+ changes:
+ bugfixes:
+ - na_cloudmanager_aggregate - accept client_id end with or without 'clients'
+ - na_cloudmanager_cifs_server - accept client_id end with or without 'clients'
+ - na_cloudmanager_connector_aws - accept client_id end with or without 'clients'
+ - na_cloudmanager_connector_azure - accept client_id end with or without 'clients'
+ - na_cloudmanager_connector_gcp - accept client_id end with or without 'clients'
+ - na_cloudmanager_cvo_aws - accept client_id end with or without 'clients'
+ - na_cloudmanager_cvo_azure - accept client_id end with or without 'clients'
+ - na_cloudmanager_cvo_gcp - accept client_id end with or without 'clients'
+ - na_cloudmanager_info - accept client_id end with or without 'clients'
+ - na_cloudmanager_nss_account - accept client_id end with or without 'clients'
+ - na_cloudmanager_snapmirror - accept client_id end with or without 'clients'
+ - na_cloudmanager_volume - accept client_id end with or without 'clients'
+ major_changes:
+ - Adding stage environment to all modules in cloudmanager
+ minor_changes:
+ - na_cloudmanager - Support service account with new options ``sa_client_id``
+ and ``sa_secret_key`` to use for API operations.
+ fragments:
+ - DEVOPS-3965.yaml
+ - DEVOPS-4021.yaml
+ - DEVOPS-4105.yaml
+ release_date: '2021-07-14'
+ 21.9.0:
+ changes:
+ minor_changes:
+ - na_cloudmanager - Support pd-balanced in ``gcp_volume_type`` for CVO GCP,
+ ``provider_volume_type`` in na_cloudmanager_snapmirror and na_cloudmanager_volume.
+ - na_cloudmanager_connector_azure - Change default value of ``virtual_machine_size``
+ to Standard_DS3_v2.
+ - na_cloudmanager_cvo_gcp - Add selflink support on subnet_id, vpc0_node_and_data_connectivity,
+ vpc1_cluster_connectivity, vpc2_ha_connectivity, vpc3_data_replication, subnet0_node_and_data_connectivity,
+ subnet1_cluster_connectivity, subnet2_ha_connectivity, and subnet3_data_replication.
+ fragments:
+ - DEVOPS-4118.yaml
+ - DEVOPS-4201.yaml
+ - DEVOPS-4205.yaml
+ release_date: '2021-08-04'
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/config.yaml b/ansible_collections/netapp/cloudmanager/changelogs/config.yaml
new file mode 100644
index 000000000..d0ffb959e
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/config.yaml
@@ -0,0 +1,32 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+ignore_other_fragment_extensions: true
+keep_fragments: true
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sanitize_changelog: true
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: NetApp CloudManager Collection
+trivial_section_name: trivial
+use_fqcn: true
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3803.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3803.yaml
new file mode 100644
index 000000000..af0b39ae9
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3803.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_connector_aws - Return newly created Azure client ID in cloud manager, instance ID and account ID. New option ``proxy_certificates``. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3844.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3844.yaml
new file mode 100644
index 000000000..e36f3ffb6
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3844.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - na_cloudmanager_cvo_aws - Return newly created AWS working_environment_id.
+ - na_cloudmanager_cvo_azure - Return newly created AZURE working_environment_id.
+ - na_cloudmanager_cvo_gcp - Return newly created GCP working_environment_id. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3909.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3909.yaml
new file mode 100644
index 000000000..6336f1adc
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3909.yaml
@@ -0,0 +1,4 @@
+bugfixes:
+ - na_cloudmanager_aggregate - Improve error message
+ - na_cloudmanager_nss_account - Improve error message
+ - na_cloudmanager_volume - Improve error message
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3910.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3910.yaml
new file mode 100644
index 000000000..0e9dd6390
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3910.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_cifs_server - Fix incorrect API call when is_workgroup is true \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3911.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3911.yaml
new file mode 100644
index 000000000..a4ffd6b1e
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3911.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_connector_azure - Fix wrong example on the document and update account_id is required field on deletion. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3912.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3912.yaml
new file mode 100644
index 000000000..f8ca958f3
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3912.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_connector_azure - Add subnet_name as aliases of subnet_id, vnet_name as aliases of vnet_id.
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3913.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3913.yaml
new file mode 100644
index 000000000..ca13f3a6f
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3913.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_connector_azure - Fix python error - msrest.exceptions.ValidationError. Parameter 'Deployment.properties' can not be None. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3922.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3922.yaml
new file mode 100644
index 000000000..37cfe5d57
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3922.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_cvo_aws - Fix incorrect placement of platformSerialNumber in the resulting json structure
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3946.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3946.yaml
new file mode 100644
index 000000000..0dd5745e7
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3946.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_connector_azure - Change client_id as optional \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3947.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3947.yaml
new file mode 100644
index 000000000..320552cb3
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3947.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_connector_gcp - rename option ``service_account_email`` and ``service_account_path`` to ``gcp_service_account_email`` and ``gcp_service_account_path`` respectively. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3948.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3948.yaml
new file mode 100644
index 000000000..3e5dd7522
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3948.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_cvo_gcp - Change vpc_id from optional to required.
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3965.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3965.yaml
new file mode 100644
index 000000000..17e1435eb
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3965.yaml
@@ -0,0 +1,2 @@
+major_changes:
+ - Adding stage environment to all modules in cloudmanager \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3967.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3967.yaml
new file mode 100644
index 000000000..ccd3e01c0
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3967.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_cvo_aws - Add ebs_volume_type gp3 support. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3975.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3975.yaml
new file mode 100644
index 000000000..8f970c8ee
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3975.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_volume - Add aggregate_name support on volume creation. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3984.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3984.yaml
new file mode 100644
index 000000000..b08225316
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3984.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - na_cloudmanager_aggregate - Add provider_volume_type gp3 support.
+ - na_cloudmanager_volume - Add provider_volume_type gp3 support.
+ - na_cloudmanager_snapmirror - Add provider_volume_type gp3 support. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3985.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3985.yaml
new file mode 100644
index 000000000..cfb6b4289
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3985.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_cvo_gcp - Apply network_project_id check on vpc1_cluster_connectivity, vpc2_ha_connectivity, vpc3_data_replication, subnet1_cluster_connectivity, subnet2_ha_connectivity, subnet3_data_replication \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3995.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3995.yaml
new file mode 100644
index 000000000..76f26a264
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-3995.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_connector_azure - Fix KeyError client_id \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4021.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4021.yaml
new file mode 100644
index 000000000..6e3903967
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4021.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager - Support service account with new options ``sa_client_id`` and ``sa_secret_key`` to use for API operations. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4065.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4065.yaml
new file mode 100644
index 000000000..e05522215
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4065.yaml
@@ -0,0 +1,5 @@
+minor_changes:
+ - na_cloudmanager_cvo_aws - Support update on svm_password, tier_level, and aws_tag.
+ - na_cloudmanager_cvo_azure - Support update on svm_password, tier_level, and azure_tag.
+ - na_cloudmanager_cvo_gcp - Support update on svm_password, tier_level, and gcp_labels.
+ - Only these parameters will be modified on the existing CVOs. svm_passowrd will be updated on each run. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4105.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4105.yaml
new file mode 100644
index 000000000..91ee46370
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4105.yaml
@@ -0,0 +1,13 @@
+bugfixes:
+ - na_cloudmanager_aggregate - accept client_id end with or without 'clients'
+ - na_cloudmanager_cifs_server - accept client_id end with or without 'clients'
+ - na_cloudmanager_connector_aws - accept client_id end with or without 'clients'
+ - na_cloudmanager_connector_azure - accept client_id end with or without 'clients'
+ - na_cloudmanager_connector_gcp - accept client_id end with or without 'clients'
+ - na_cloudmanager_cvo_aws - accept client_id end with or without 'clients'
+ - na_cloudmanager_cvo_azure - accept client_id end with or without 'clients'
+ - na_cloudmanager_cvo_gcp - accept client_id end with or without 'clients'
+ - na_cloudmanager_info - accept client_id end with or without 'clients'
+ - na_cloudmanager_nss_account - accept client_id end with or without 'clients'
+ - na_cloudmanager_snapmirror - accept client_id end with or without 'clients'
+ - na_cloudmanager_volume - accept client_id end with or without 'clients' \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4118.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4118.yaml
new file mode 100644
index 000000000..e176f9574
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4118.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_cvo_gcp - Add selflink support on subnet_id, vpc0_node_and_data_connectivity, vpc1_cluster_connectivity, vpc2_ha_connectivity, vpc3_data_replication, subnet0_node_and_data_connectivity, subnet1_cluster_connectivity, subnet2_ha_connectivity, and subnet3_data_replication. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4136.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4136.yaml
new file mode 100644
index 000000000..8d5494695
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4136.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_cvo_azure - Add new parameter ``ha_enable_https`` for HA CVO to enable the HTTPS connection from CVO to storage accounts. This can impact write performance. The default is false. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4164.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4164.yaml
new file mode 100644
index 000000000..e8fb7cdbd
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4164.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - na_cloudmanager_cvo_aws - add new parameter ``kms_key_id`` and ``kms_key_arn`` as AWS encryption parameters to support AWS CVO encryption
+ - na_cloudmanager_cvo_azure - add new parameter ``azure_encryption_parameters`` to support AZURE CVO encryption
+ - na_cloudmanager_cvo_gcp - add new parameter ``gcp_encryption_parameters`` to support GCP CVO encryption
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4200.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4200.yaml
new file mode 100644
index 000000000..c6dc8ce07
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4200.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_snapmirror - key error CloudProviderName for ONPREM operation
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4201.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4201.yaml
new file mode 100644
index 000000000..b55a9bc0d
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4201.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_connector_azure - Change default value of ``virtual_machine_size`` to Standard_DS3_v2. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4205.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4205.yaml
new file mode 100644
index 000000000..58750c3aa
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4205.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager - Support pd-balanced in ``gcp_volume_type`` for CVO GCP, ``provider_volume_type`` in na_cloudmanager_snapmirror and na_cloudmanager_volume. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4223.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4223.yaml
new file mode 100644
index 000000000..b4f998061
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4223.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - na_cloudmanager_cvo_aws - Support license_type update
+ - na_cloudmanager_cvo_azure - Support license_type update
+ - na_cloudmanager_cvo_gcp - Support license_type update \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4264.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4264.yaml
new file mode 100644
index 000000000..d986b0991
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4264.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - Add ontap image upgrade on AWS, AZURE and GCP CVOs if ``upgrade_ontap_version`` is true and ``ontap_version`` is provided with a specific version. ``use_latest_version`` has to be false. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4267.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4267.yaml
new file mode 100644
index 000000000..1ce27541a
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4267.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - Add CVO modification unit tests \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4271.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4271.yaml
new file mode 100644
index 000000000..d6cbc19e0
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4271.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - na_cloudmanager_cvo_aws - Support instance_type update
+ - na_cloudmanager_cvo_azure - Support instance_type update
+ - na_cloudmanager_cvo_gcp - Support instance_type update \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4281.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4281.yaml
new file mode 100644
index 000000000..33295e409
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4281.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_aws_fsx - Import AWS FSX to CloudManager by adding new parameters ``import_file_system`` and ``file_system_id``. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4292.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4292.yaml
new file mode 100644
index 000000000..22bfaa25a
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4292.yaml
@@ -0,0 +1,8 @@
+minor_changes:
+ - na_cloudmanager_connector_gcp - make the module idempotent for create and delete.
+ - na_cloudmanager_connector_gcp - automatically fetch client_id for delete.
+ - na_cloudmanager_connector_gcp - report client_id if connector already exists.
+ - all modules - better error reporting if refresh_token is not valid.
+
+bugfixes:
+ - na_cloudmanager_connector_gcp - typeError when using proxy certificates.
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4298.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4298.yaml
new file mode 100644
index 000000000..2f0281975
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4298.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_connector_aws - Fix default ami not based on the region in resource file \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4303.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4303.yaml
new file mode 100644
index 000000000..183a0e149
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4303.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - Adding new parameter ``capacity_package_name`` for all CVOs creation with capacity based ``license_type`` capacity-paygo or ha-capacity-paygo for HA. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4321.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4321.yaml
new file mode 100644
index 000000000..f06f7e78d
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4321.yaml
@@ -0,0 +1,4 @@
+minor_changes:
+ - na_cloudmanager_cvo_aws - Add unit tests for capacity based license support.
+ - na_cloudmanager_cvo_azure - Add unit tests for capacity based license support.
+ - na_cloudmanager_cvo_gcp - Add unit tests for capacity based license support and delete cvo. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4327.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4327.yaml
new file mode 100644
index 000000000..15f75f223
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4327.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - netapp.py - improve error handling with error content. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4328.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4328.yaml
new file mode 100644
index 000000000..5d9a08a85
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4328.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_snapmirror - working environment get information api not working for onprem is fixed \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4358.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4358.yaml
new file mode 100644
index 000000000..cbc0dcaf0
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4358.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - na_cloudmanager_cvo_azure - Add extra tag handling on azure_tag maintenance
+ - na_cloudmanager_cvo_gcp - Add extra label hendling for HA and only allow add new labels on gcp_labels \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4386.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4386.yaml
new file mode 100644
index 000000000..e9c67085d
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4386.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Fix cannot find working environment if ``working_environment_name`` is provided \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4416.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4416.yaml
new file mode 100644
index 000000000..6b4b660a0
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4416.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - PR1 - allow usage of Ansible module group defaults - for Ansible 2.12+.
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4458.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4458.yaml
new file mode 100644
index 000000000..75058f80c
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4458.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_snapmirror - report actual error rather than None with "Error getting destination info".
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4492.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4492.yaml
new file mode 100644
index 000000000..2e37eb799
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4492.yaml
@@ -0,0 +1,5 @@
+minor_changes:
+ - na_cloudmanager_connector_aws - make the module idempotent for create and delete.
+ - na_cloudmanager_connector_aws - automatically fetch client_id and instance_id for delete.
+ - na_cloudmanager_connector_aws - report client_id and instance_id if connector already exists.
+ - na_cloudmanager_info - new subsets - account_info, agents_info, active_agents_info
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4500.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4500.yaml
new file mode 100644
index 000000000..10384a2b2
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4500.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - Add ``update_svm_password`` for ``svm_password`` update on AWS, AZURE and GCP CVOs. Update ``svm_password`` if ``update_svm_password`` is true. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4513.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4513.yaml
new file mode 100644
index 000000000..adb0ea1cb
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4513.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_cvo_gcp - handle extra two auto-gen GCP labels to prevent update ``gcp_labels`` failure.
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4516.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4516.yaml
new file mode 100644
index 000000000..f8bfbeb99
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4516.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_snapmirror - Add FSX to snapmirror. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4542.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4542.yaml
new file mode 100644
index 000000000..51ee7719f
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4542.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_volume - Report error if the volume properties cannot be modified. Add support ``tiering_policy`` and ``snapshot_policy_name`` modification.
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4563.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4563.yaml
new file mode 100644
index 000000000..c7a1c8a40
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4563.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - CVO working environment clusterProperties is deprecated. Make changes accordingly. Add CVO update status check on ``instance_type`` change.
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4567.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4567.yaml
new file mode 100644
index 000000000..4aeae1916
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4567.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Add check when volume is capacity tiered. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4647.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4647.yaml
new file mode 100644
index 000000000..5320ef4cf
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4647.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - Add the description of client_id based on the cloudmanager UI.
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4703.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4703.yaml
new file mode 100644
index 000000000..adb6c3d51
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4703.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - Set license_type default value 'capacity-paygo' for single node 'ha-capacity-paygo' for HA and 'capacity_package_name' value 'Essential' \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4758.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4758.yaml
new file mode 100644
index 000000000..2bb42546d
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4758.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_connector_gcp - when using the user application default credential authentication by running the command gcloud auth application-default login, ``gcp_service_account_path`` is not needed. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4820.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4820.yaml
new file mode 100644
index 000000000..28e61171d
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-4820.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_connector_azure - Fix string formatting error when deleting the connector. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5002.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5002.yaml
new file mode 100644
index 000000000..4543db292
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5002.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_connector_azure - Support user defined ``storage_account`` name. The ``storage_account`` can be created automatically. When ``storage_account`` is not set, the name is constructed by appending 'sa' to the connector ``name``. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5151.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5151.yaml
new file mode 100644
index 000000000..929ad60da
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5151.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_connector_azure - Support full ``subnet_id`` and ``vnet_id``. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5252.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5252.yaml
new file mode 100644
index 000000000..9f9a98f58
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5252.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - Support ``writing_speed_state`` modification on AWS, AZURE and GCP CVOs. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5307.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5307.yaml
new file mode 100644
index 000000000..01fb9b920
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5307.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_volume - Support AWS FsxN working environment. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5342.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5342.yaml
new file mode 100644
index 000000000..b7d0e1bc9
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5342.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - Add ``subnet_path`` option in CVO GCP. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5366.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5366.yaml
new file mode 100644
index 000000000..16ea910ec
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5366.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_cvo_aws - Add new parameter ``cluster_key_pair_name`` to support SSH authentication method key pair. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5437.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5437.yaml
new file mode 100644
index 000000000..3222dc7c5
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5437.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - na_cloudmanager_connector_gcp - Fix default machine_type value on the GCP connector. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5452.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5452.yaml
new file mode 100644
index 000000000..3a9207105
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5452.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - Add ``svm_name`` option in CVO for AWS, AZURE and GCP creation and update. \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5472.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5472.yaml
new file mode 100644
index 000000000..494e17c3d
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5472.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - Add ``availability_zone`` option in CVO Azure on the location configuration.
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5527.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5527.yaml
new file mode 100644
index 000000000..e1643c975
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5527.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - na_cloudmanager_connector_azure - expose connector managed system identity principal_id to perform role assignment \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5540.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5540.yaml
new file mode 100644
index 000000000..ca5e328eb
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5540.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - new meta/execution-environment.yml is failing ansible-builder sanitize step.
diff --git a/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5562.yaml b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5562.yaml
new file mode 100644
index 000000000..287e843b1
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/changelogs/fragments/DEVOPS-5562.yaml
@@ -0,0 +1,3 @@
+minor_changes:
+ - na_cloudmanager_cvo_azure - Add parameter ``availability_zone_node1`` and ``availability_zone_node2`` for CVO Azure HA location
+ - na_cloudmanager_cvo_azure - Add new ``storage_type`` value Premium_ZRS \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/execution_environments/README.md b/ansible_collections/netapp/cloudmanager/execution_environments/README.md
new file mode 100644
index 000000000..fda73b90f
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/execution_environments/README.md
@@ -0,0 +1,34 @@
+# How to build an Ansible Execution Environment
+
+## Prerequisites
+This was tested with ansible-builder version 1.1.0.
+
+## Building from Galaxy
+Using the files in the ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy directory as a template:
+- execution-environment.yml describes the build environment.
+- requirements.yml defines the collections to add into you execution environment.
+
+Then build with:
+
+```
+ansible-builder build
+```
+
+For instance, using podman instead of docker, and tagging:
+```
+ansible-builder build --container-runtime=podman --tag myregistry.io/ansible-ee-netapp-cm:21.20.1 -f execution-environment.yml -v 3
+```
+
+In my case, I needed to use sudo.
+
+## Building from GitHub
+Alternativaly, the source code can be downloaded from GitHub. It allows to get code before release (at your own risks) or to use a fork.
+See ansible_collections/netapp/cloudmanager/execution_environments/from_github/requirements.yml
+
+## References
+
+https://ansible-builder.readthedocs.io/en/stable/usage/
+
+https://docs.ansible.com/automation-controller/latest/html/userguide/ee_reference.html
+
+
diff --git a/ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/execution-environment.yml b/ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/execution-environment.yml
new file mode 100644
index 000000000..466fb8373
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/execution-environment.yml
@@ -0,0 +1,10 @@
+---
+version: 1
+
+# ansible_config: 'ansible.cfg'
+
+# build_arg_defaults:
+# EE_BASE_IMAGE: 'quay.io/ansible/ansible-runner:latest'
+
+dependencies:
+ galaxy: requirements.yml
diff --git a/ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/requirements.yml b/ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/requirements.yml
new file mode 100644
index 000000000..b19e33b49
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/execution_environments/from_galaxy/requirements.yml
@@ -0,0 +1,13 @@
+---
+collections:
+ # Install collections from Galaxy
+ # - name: ansible.posix
+ # - name: netapp.aws
+ # # name: - netapp.azure
+ - name: netapp.cloudmanager
+ version: 21.20.1
+ # - name: netapp.elementsw
+ # - name: netapp.ontap
+ # version: 21.24.1
+ # - name: netapp.storagegrid
+ # - name: netapp.um_info
diff --git a/ansible_collections/netapp/cloudmanager/execution_environments/from_github/execution-environment.yml b/ansible_collections/netapp/cloudmanager/execution_environments/from_github/execution-environment.yml
new file mode 100644
index 000000000..466fb8373
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/execution_environments/from_github/execution-environment.yml
@@ -0,0 +1,10 @@
+---
+version: 1
+
+# ansible_config: 'ansible.cfg'
+
+# build_arg_defaults:
+# EE_BASE_IMAGE: 'quay.io/ansible/ansible-runner:latest'
+
+dependencies:
+ galaxy: requirements.yml
diff --git a/ansible_collections/netapp/cloudmanager/execution_environments/from_github/requirements.yml b/ansible_collections/netapp/cloudmanager/execution_environments/from_github/requirements.yml
new file mode 100644
index 000000000..efea39c22
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/execution_environments/from_github/requirements.yml
@@ -0,0 +1,18 @@
+---
+collections:
+ # Install collections from Galaxy
+ # - name: ansible.posix
+ # - name: netapp.aws
+ # # name: - netapp.azure
+ # - name: netapp.cloudmanager
+ # version: 21.20.1
+ # - name: netapp.elementsw
+ # - name: netapp.ontap
+ # version: 21.24.1
+ # - name: netapp.storagegrid
+ # - name: netapp.um_info
+
+ # Install a collection from GitHub.
+ - source: https://github.com/ansible-collections/netapp.cloudmanager.git
+ type: git
+ version: test_ee_21_20_0
diff --git a/ansible_collections/netapp/cloudmanager/execution_environments/requirements.txt b/ansible_collections/netapp/cloudmanager/execution_environments/requirements.txt
new file mode 100644
index 000000000..02dd40520
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/execution_environments/requirements.txt
@@ -0,0 +1 @@
+ansible-builder
diff --git a/ansible_collections/netapp/cloudmanager/kubectl.sha256 b/ansible_collections/netapp/cloudmanager/kubectl.sha256
new file mode 100644
index 000000000..13867098c
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/kubectl.sha256
@@ -0,0 +1 @@
+b859766d7b47267af5cc1ee01a2d0c3c137dbfc53cd5be066181beed11ec7d34 \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/meta/execution-environment.yml b/ansible_collections/netapp/cloudmanager/meta/execution-environment.yml
new file mode 100644
index 000000000..ad211b139
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/meta/execution-environment.yml
@@ -0,0 +1,3 @@
+version: 1
+dependencies:
+ python: requirements.txt
diff --git a/ansible_collections/netapp/cloudmanager/meta/runtime.yml b/ansible_collections/netapp/cloudmanager/meta/runtime.yml
new file mode 100644
index 000000000..df9365301
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/meta/runtime.yml
@@ -0,0 +1,17 @@
+---
+requires_ansible: ">=2.9.10"
+action_groups:
+ netapp_cloudmanager:
+ - na_cloudmanager_aggregate
+ - na_cloudmanager_cifs_server
+ - na_cloudmanager_connector_aws
+ - na_cloudmanager_connector_azure
+ - na_cloudmanager_connector_gcp
+ - na_cloudmanager_cvo_aws
+ - na_cloudmanager_cvo_azure
+ - na_cloudmanager_cvo_gcp
+ - na_cloudmanager_info
+ - na_cloudmanager_nss_account
+ - na_cloudmanager_snapmirror
+ - na_cloudmanager_volume
+ - na_cloudmanager_aws_fsx
diff --git a/ansible_collections/netapp/cloudmanager/plugins/README.md b/ansible_collections/netapp/cloudmanager/plugins/README.md
new file mode 100644
index 000000000..6541cf7cf
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/README.md
@@ -0,0 +1,31 @@
+# Collections Plugins Directory
+
+This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that
+is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that
+would contain module utils and modules respectively.
+
+Here is an example directory of the majority of plugins currently supported by Ansible:
+
+```
+└── plugins
+ ├── action
+ ├── become
+ ├── cache
+ ├── callback
+ ├── cliconf
+ ├── connection
+ ├── filter
+ ├── httpapi
+ ├── inventory
+ ├── lookup
+ ├── module_utils
+ ├── modules
+ ├── netconf
+ ├── shell
+ ├── strategy
+ ├── terminal
+ ├── test
+ └── vars
+```
+
+A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible/2.9/plugins/plugins.html). \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/plugins/doc_fragments/netapp.py b/ansible_collections/netapp/cloudmanager/plugins/doc_fragments/netapp.py
new file mode 100644
index 000000000..76807bb1c
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/doc_fragments/netapp.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, NetApp Ansible Team <ng-ansibleteam@netapp.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Documentation fragment for CLOUDMANAGER
+ CLOUDMANAGER = """
+options:
+ refresh_token:
+ type: str
+ description:
+ - The refresh token for NetApp Cloud Manager API operations.
+
+ sa_secret_key:
+ type: str
+ description:
+ - The service account secret key for NetApp Cloud Manager API operations.
+
+ sa_client_id:
+ type: str
+ description:
+ - The service account secret client ID for NetApp Cloud Manager API operations.
+
+ environment:
+ type: str
+ description:
+ - The environment for NetApp Cloud Manager API operations.
+ default: prod
+ choices: ['prod', 'stage']
+ version_added: 21.8.0
+
+ feature_flags:
+ description:
+ - Enable or disable a new feature.
+ - This can be used to enable an experimental feature or disable a new feature that breaks backward compatibility.
+ - Supported keys and values are subject to change without notice. Unknown keys are ignored.
+ type: dict
+ version_added: 21.11.0
+notes:
+ - The modules prefixed with na_cloudmanager are built to manage CloudManager and CVO deployments in AWS/GCP/Azure clouds.
+ - If sa_client_id and sa_secret_key are provided, service account will be used in operations. refresh_token will be ignored.
+"""
diff --git a/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp.py b/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp.py
new file mode 100644
index 000000000..eaecc8f00
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp.py
@@ -0,0 +1,332 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2017-2021, NetApp Ansible Team <ng-ansibleteam@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+netapp.py: wrapper around send_requests and other utilities
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import time
+from ansible.module_utils.basic import missing_required_lib
+
+try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+COLLECTION_VERSION = "21.22.0"
+PROD_ENVIRONMENT = {
+ 'CLOUD_MANAGER_HOST': 'cloudmanager.cloud.netapp.com',
+ 'AUTH0_DOMAIN': 'netapp-cloud-account.auth0.com',
+ 'SA_AUTH_HOST': 'cloudmanager.cloud.netapp.com/auth/oauth/token',
+ 'AUTH0_CLIENT': 'Mu0V1ywgYteI6w1MbD15fKfVIUrNXGWC',
+ 'AMI_FILTER': 'Setup-As-Service-AMI-Prod*',
+ 'AWS_ACCOUNT': '952013314444',
+ 'GCP_IMAGE_PROJECT': 'netapp-cloudmanager',
+ 'GCP_IMAGE_FAMILY': 'cloudmanager',
+ 'CVS_HOST_NAME': 'https://api.services.cloud.netapp.com'
+}
+STAGE_ENVIRONMENT = {
+ 'CLOUD_MANAGER_HOST': 'staging.cloudmanager.cloud.netapp.com',
+ 'AUTH0_DOMAIN': 'staging-netapp-cloud-account.auth0.com',
+ 'SA_AUTH_HOST': 'staging.cloudmanager.cloud.netapp.com/auth/oauth/token',
+ 'AUTH0_CLIENT': 'O6AHa7kedZfzHaxN80dnrIcuPBGEUvEv',
+ 'AMI_FILTER': 'Setup-As-Service-AMI-*',
+ 'AWS_ACCOUNT': '282316784512',
+ 'GCP_IMAGE_PROJECT': 'tlv-automation',
+ 'GCP_IMAGE_FAMILY': 'occm-automation',
+ 'CVS_HOST_NAME': 'https://staging.api.services.cloud.netapp.com'
+}
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+
+POW2_BYTE_MAP = dict(
+ # Here, 1 kb = 1024
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+)
+
+
+LOG = logging.getLogger(__name__)
+LOG_FILE = '/tmp/cloudmanager_apis.log'
+
+
+def cloudmanager_host_argument_spec():
+
+ return dict(
+ refresh_token=dict(required=False, type='str', no_log=True),
+ sa_client_id=dict(required=False, type='str', no_log=True),
+ sa_secret_key=dict(required=False, type='str', no_log=True),
+ environment=dict(required=False, type='str', choices=['prod', 'stage'], default='prod'),
+ feature_flags=dict(required=False, type='dict')
+ )
+
+
+def has_feature(module, feature_name):
+ feature = get_feature(module, feature_name)
+ if isinstance(feature, bool):
+ return feature
+ module.fail_json(msg="Error: expected bool type for feature flag: %s, found %s" % (feature_name, type(feature)))
+
+
+def get_feature(module, feature_name):
+ ''' if the user has configured the feature, use it
+ otherwise, use our default
+ '''
+ default_flags = dict(
+ trace_apis=False, # if True, append REST requests/responses to /tmp/cloudmanager_apis.log
+ trace_headers=False, # if True, and if trace_apis is True, include <large> headers in trace
+ show_modified=True,
+ simulator=False, # if True, it is running on simulator
+ )
+
+ if module.params['feature_flags'] is not None and feature_name in module.params['feature_flags']:
+ return module.params['feature_flags'][feature_name]
+ if feature_name in default_flags:
+ return default_flags[feature_name]
+ module.fail_json(msg="Internal error: unexpected feature flag: %s" % feature_name)
+
+
+class CloudManagerRestAPI(object):
+ """ wrapper around send_request """
+ def __init__(self, module, timeout=60):
+ self.module = module
+ self.timeout = timeout
+ self.refresh_token = self.module.params['refresh_token']
+ self.sa_client_id = self.module.params['sa_client_id']
+ self.sa_secret_key = self.module.params['sa_secret_key']
+ self.environment = self.module.params['environment']
+ if self.environment == 'prod':
+ self.environment_data = PROD_ENVIRONMENT
+ elif self.environment == 'stage':
+ self.environment_data = STAGE_ENVIRONMENT
+ self.url = 'https://'
+ self.api_root_path = None
+ self.check_required_library()
+ if has_feature(module, 'trace_apis'):
+ logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s')
+ self.log_headers = has_feature(module, 'trace_headers') # requires trace_apis to do anything
+ self.simulator = has_feature(module, 'simulator')
+ self.token_type, self.token = self.get_token()
+
+ def check_required_library(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'))
+
+ def format_client_id(self, client_id):
+ return client_id if client_id.endswith('clients') else client_id + 'clients'
+
+ def build_url(self, api):
+ # most requests are sent to Cloud Manager, but for connectors we need to manage VM instances using AWS, Azure, or GCP APIs
+ if api.startswith('http'):
+ return api
+ # add host if API starts with / and host is not already included in self.url
+ prefix = self.environment_data['CLOUD_MANAGER_HOST'] if self.environment_data['CLOUD_MANAGER_HOST'] not in self.url and api.startswith('/') else ''
+ return self.url + prefix + api
+
+ def send_request(self, method, api, params, json=None, data=None, header=None, authorized=True):
+ ''' send http request and process response, including error conditions '''
+ url = self.build_url(api)
+ headers = {
+ 'Content-type': "application/json",
+ 'Referer': "Ansible_NetApp",
+ }
+ if authorized:
+ headers['Authorization'] = self.token_type + " " + self.token
+ if header is not None:
+ headers.update(header)
+ for __ in range(3):
+ json_dict, error_details, on_cloud_request_id = self._send_request(method, url, params, json, data, headers)
+ # we observe this error with DELETE on agents-mgmt/agent (and sometimes on GET)
+ if error_details is not None and 'Max retries exceeded with url:' in error_details:
+ time.sleep(5)
+ else:
+ break
+ return json_dict, error_details, on_cloud_request_id
+
+ def _send_request(self, method, url, params, json, data, headers):
+ json_dict = None
+ json_error = None
+ error_details = None
+ on_cloud_request_id = None
+ response = None
+ status_code = None
+
+ def get_json(response):
+ ''' extract json, and error message if present '''
+ error = None
+ try:
+ json = response.json()
+ except ValueError:
+ return None, None
+ success_code = [200, 201, 202]
+ if response.status_code not in success_code:
+ error = json.get('message')
+ self.log_error(response.status_code, 'HTTP error: %s' % error)
+ return json, error
+
+ self.log_request(method=method, url=url, params=params, json=json, data=data, headers=headers)
+ try:
+ response = requests.request(method, url, headers=headers, timeout=self.timeout, params=params, json=json, data=data)
+ status_code = response.status_code
+ if status_code >= 300 or status_code < 200:
+ self.log_error(status_code, 'HTTP status code error: %s' % response.content)
+ return response.content, str(status_code), on_cloud_request_id
+ # If the response was successful, no Exception will be raised
+ json_dict, json_error = get_json(response)
+ if response.headers.get('OnCloud-Request-Id', '') != '':
+ on_cloud_request_id = response.headers.get('OnCloud-Request-Id')
+ except requests.exceptions.HTTPError as err:
+ self.log_error(status_code, 'HTTP error: %s' % err)
+ error_details = str(err)
+ except requests.exceptions.ConnectionError as err:
+ self.log_error(status_code, 'Connection error: %s' % err)
+ error_details = str(err)
+ except Exception as err:
+ self.log_error(status_code, 'Other error: %s' % err)
+ error_details = str(err)
+ if json_error is not None:
+ self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error))
+ error_details = json_error
+ if response:
+ self.log_debug(status_code, response.content)
+ return json_dict, error_details, on_cloud_request_id
+
+ # If an error was reported in the json payload, it is handled below
+ def get(self, api, params=None, header=None):
+ method = 'GET'
+ return self.send_request(method=method, api=api, params=params, json=None, header=header)
+
+ def post(self, api, data, params=None, header=None, gcp_type=False, authorized=True):
+ method = 'POST'
+ if gcp_type:
+ return self.send_request(method=method, api=api, params=params, data=data, header=header)
+ else:
+ return self.send_request(method=method, api=api, params=params, json=data, header=header, authorized=authorized)
+
+ def patch(self, api, data, params=None, header=None):
+ method = 'PATCH'
+ return self.send_request(method=method, api=api, params=params, json=data, header=header)
+
+ def put(self, api, data, params=None, header=None):
+ method = 'PUT'
+ return self.send_request(method=method, api=api, params=params, json=data, header=header)
+
+ def delete(self, api, data, params=None, header=None):
+ method = 'DELETE'
+ return self.send_request(method=method, api=api, params=params, json=data, header=header)
+
+ def get_token(self):
+ if self.sa_client_id is not None and self.sa_client_id != "" and self.sa_secret_key is not None and self.sa_secret_key != "":
+ response, error, ocr_id = self.post(self.environment_data['SA_AUTH_HOST'],
+ data={"grant_type": "client_credentials", "client_secret": self.sa_secret_key,
+ "client_id": self.sa_client_id, "audience": "https://api.cloud.netapp.com"},
+ authorized=False)
+ elif self.refresh_token is not None and self.refresh_token != "":
+ response, error, ocr_id = self.post(self.environment_data['AUTH0_DOMAIN'] + '/oauth/token',
+ data={"grant_type": "refresh_token", "refresh_token": self.refresh_token,
+ "client_id": self.environment_data['AUTH0_CLIENT'],
+ "audience": "https://api.cloud.netapp.com"},
+ authorized=False)
+ else:
+ self.module.fail_json(msg='Missing refresh_token or sa_client_id and sa_secret_key')
+
+ if error:
+ self.module.fail_json(msg='Error acquiring token: %s, %s' % (str(error), str(response)))
+ token = response['access_token']
+ token_type = response['token_type']
+
+ return token_type, token
+
+ def wait_on_completion(self, api_url, action_name, task, retries, wait_interval):
+ while True:
+ cvo_status, failure_error_message, error = self.check_task_status(api_url)
+ if error is not None:
+ return error
+ if cvo_status == -1:
+ return 'Failed to %s %s, error: %s' % (task, action_name, failure_error_message)
+ elif cvo_status == 1:
+ return None # success
+ # status value 0 means pending
+ if retries == 0:
+ return 'Taking too long for %s to %s or not properly setup' % (action_name, task)
+ time.sleep(wait_interval)
+ retries = retries - 1
+
+ def check_task_status(self, api_url):
+ headers = {
+ 'X-Agent-Id': self.format_client_id(self.module.params['client_id'])
+ }
+
+ network_retries = 3
+ while True:
+ result, error, dummy = self.get(api_url, None, header=headers)
+ if error is not None:
+ if network_retries <= 0:
+ return 0, '', error
+ time.sleep(1)
+ network_retries -= 1
+ else:
+ response = result
+ break
+ return response['status'], response['error'], None
+
+ def log_error(self, status_code, message):
+ LOG.error("%s: %s", status_code, message)
+
+ def log_debug(self, status_code, content):
+ LOG.debug("%s: %s", status_code, content)
+
+ def log_request(self, method, params, url, json, data, headers):
+ contents = {
+ 'method': method,
+ 'url': url,
+ 'json': json,
+ 'data': data
+ }
+ if params:
+ contents['params'] = params
+ if self.log_headers:
+ contents['headers'] = headers
+ self.log_debug('sending', repr(contents))
diff --git a/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp_module.py b/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp_module.py
new file mode 100644
index 000000000..aa73f205a
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp_module.py
@@ -0,0 +1,1381 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2022, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+''' Support class for NetApp ansible modules '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from copy import deepcopy
+import json
+import re
+import base64
+import time
+
+
+def cmp(a, b):
+ '''
+ Python 3 does not have a cmp function, this will do the cmp.
+ :param a: first object to check
+ :param b: second object to check
+ :return:
+ '''
+ # convert to lower case for string comparison.
+ if a is None:
+ return -1
+ if isinstance(a, str) and isinstance(b, str):
+ a = a.lower()
+ b = b.lower()
+ # if list has string element, convert string to lower case.
+ if isinstance(a, list) and isinstance(b, list):
+ a = [x.lower() if isinstance(x, str) else x for x in a]
+ b = [x.lower() if isinstance(x, str) else x for x in b]
+ a.sort()
+ b.sort()
+ return (a > b) - (a < b)
+
+
+class NetAppModule(object):
+ '''
+ Common class for NetApp modules
+ set of support functions to derive actions based
+ on the current state of the system, and a desired state
+ '''
+
+ def __init__(self):
+ self.log = []
+ self.changed = False
+ self.parameters = {'name': 'not intialized'}
+
+ def set_parameters(self, ansible_params):
+ self.parameters = {}
+ for param in ansible_params:
+ if ansible_params[param] is not None:
+ self.parameters[param] = ansible_params[param]
+ return self.parameters
+
+ def get_cd_action(self, current, desired):
+ ''' takes a desired state and a current state, and return an action:
+ create, delete, None
+ eg:
+ is_present = 'absent'
+ some_object = self.get_object(source)
+ if some_object is not None:
+ is_present = 'present'
+ action = cd_action(current=is_present, desired = self.desired.state())
+ '''
+ desired_state = desired['state'] if 'state' in desired else 'present'
+ if current is None and desired_state == 'absent':
+ return None
+ if current is not None and desired_state == 'present':
+ return None
+ # change in state
+ self.changed = True
+ if current is not None:
+ return 'delete'
+ return 'create'
+
+ def compare_and_update_values(self, current, desired, keys_to_compare):
+ updated_values = {}
+ is_changed = False
+ for key in keys_to_compare:
+ if key in current:
+ if key in desired and desired[key] is not None:
+ if current[key] != desired[key]:
+ updated_values[key] = desired[key]
+ is_changed = True
+ else:
+ updated_values[key] = current[key]
+ else:
+ updated_values[key] = current[key]
+
+ return updated_values, is_changed
+
+ def get_working_environments_info(self, rest_api, headers):
+ '''
+ Get all working environments info
+ '''
+ api = "/occm/api/working-environments"
+ response, error, dummy = rest_api.get(api, None, header=headers)
+ if error is not None:
+ return response, error
+ else:
+ return response, None
+
+ def look_up_working_environment_by_name_in_list(self, we_list, name):
+ '''
+ Look up working environment by the name in working environment list
+ '''
+ for we in we_list:
+ if we['name'] == name:
+ return we, None
+ return None, "look_up_working_environment_by_name_in_list: Working environment not found"
+
+ def get_working_environment_details_by_name(self, rest_api, headers, name, provider=None):
+ '''
+ Use working environment name to get working environment details including:
+ name: working environment name,
+ publicID: working environment ID
+ cloudProviderName,
+ isHA,
+ svmName
+ '''
+ # check the working environment exist or not
+ api = "/occm/api/working-environments/exists/" + name
+ response, error, dummy = rest_api.get(api, None, header=headers)
+ if error is not None:
+ return None, error
+
+ # get working environment lists
+ api = "/occm/api/working-environments"
+ response, error, dummy = rest_api.get(api, None, header=headers)
+ if error is not None:
+ return None, error
+ # look up the working environment in the working environment lists
+ if provider is None or provider == 'onPrem':
+ working_environment_details, error = self.look_up_working_environment_by_name_in_list(response['onPremWorkingEnvironments'], name)
+ if error is None:
+ return working_environment_details, None
+ if provider is None or provider == 'gcp':
+ working_environment_details, error = self.look_up_working_environment_by_name_in_list(response['gcpVsaWorkingEnvironments'], name)
+ if error is None:
+ return working_environment_details, None
+ if provider is None or provider == 'azure':
+ working_environment_details, error = self.look_up_working_environment_by_name_in_list(response['azureVsaWorkingEnvironments'], name)
+ if error is None:
+ return working_environment_details, None
+ if provider is None or provider == 'aws':
+ working_environment_details, error = self.look_up_working_environment_by_name_in_list(response['vsaWorkingEnvironments'], name)
+ if error is None:
+ return working_environment_details, None
+ return None, "get_working_environment_details_by_name: Working environment not found"
+
+ def get_working_environment_details(self, rest_api, headers):
+ '''
+ Use working environment id to get working environment details including:
+ name: working environment name,
+ publicID: working environment ID
+ cloudProviderName,
+ ontapClusterProperties,
+ isHA,
+ status,
+ userTags,
+ workingEnvironmentType,
+ '''
+ api = "/occm/api/working-environments/"
+ api += self.parameters['working_environment_id']
+ response, error, dummy = rest_api.get(api, None, header=headers)
+ if error:
+ return None, "Error: get_working_environment_details %s" % error
+ return response, None
+
+ def get_aws_fsx_details(self, rest_api, header=None, name=None):
+ '''
+ Use working environment id and tenantID to get working environment details including:
+ name: working environment name,
+ publicID: working environment ID
+ '''
+ api = "/fsx-ontap/working-environments/"
+ api += self.parameters['tenant_id']
+ count = 0
+ fsx_details = None
+ if name is None:
+ name = self.parameters['name']
+ response, error, dummy = rest_api.get(api, None, header=header)
+ if error:
+ return response, "Error: get_aws_fsx_details %s" % error
+ for each in response:
+ if each['name'] == name:
+ count += 1
+ fsx_details = each
+ if self.parameters.get('working_environment_id'):
+ if each['id'] == self.parameters['working_environment_id']:
+ return each, None
+ if count == 1:
+ return fsx_details, None
+ elif count > 1:
+ return response, "More than one AWS FSx found for %s, use working_environment_id for delete" \
+ "or use different name for create" % name
+ return None, None
+
+ def get_aws_fsx_details_by_id(self, rest_api, header=None):
+ '''
+ Use working environment id and tenantID to get working environment details including:
+ publicID: working environment ID
+ '''
+ api = "/fsx-ontap/working-environments/%s" % self.parameters['tenant_id']
+ response, error, dummy = rest_api.get(api, None, header=header)
+ if error:
+ return response, "Error: get_aws_fsx_details %s" % error
+ for each in response:
+ if self.parameters.get('destination_working_environment_id') and each['id'] == self.parameters['destination_working_environment_id']:
+ return each, None
+ return None, None
+
+ def get_aws_fsx_details_by_name(self, rest_api, header=None):
+ '''
+ Use working environment name and tenantID to get working environment details including:
+ name: working environment name,
+ '''
+ api = "/fsx-ontap/working-environments/%s" % self.parameters['tenant_id']
+ count = 0
+ fsx_details = None
+ response, error, dummy = rest_api.get(api, None, header=header)
+ if error:
+ return response, "Error: get_aws_fsx_details_by_name %s" % error
+ for each in response:
+ if each['name'] == self.parameters['destination_working_environment_name']:
+ count += 1
+ fsx_details = each
+ if count == 1:
+ return fsx_details['id'], None
+ if count > 1:
+ return response, "More than one AWS FSx found for %s" % self.parameters['name']
+ return None, None
+
+ def get_aws_fsx_svm(self, rest_api, id, header=None):
+ '''
+ Use working environment id and tenantID to get FSx svm details including:
+ publicID: working environment ID
+ '''
+ api = "/occm/api/fsx/working-environments/%s/svms" % id
+ response, error, dummy = rest_api.get(api, None, header=header)
+ if error:
+ return response, "Error: get_aws_fsx_svm %s" % error
+ if len(response) == 0:
+ return None, "Error: no SVM found for %s" % id
+ return response[0]['name'], None
+
+ def get_working_environment_detail_for_snapmirror(self, rest_api, headers):
+
+ source_working_env_detail, dest_working_env_detail = {}, {}
+ if self.parameters.get('source_working_environment_id'):
+ api = '/occm/api/working-environments'
+ working_env_details, error, dummy = rest_api.get(api, None, header=headers)
+ if error:
+ return None, None, "Error getting WE info: %s: %s" % (error, working_env_details)
+ for dummy, values in working_env_details.items():
+ for each in values:
+ if each['publicId'] == self.parameters['source_working_environment_id']:
+ source_working_env_detail = each
+ break
+ elif self.parameters.get('source_working_environment_name'):
+ source_working_env_detail, error = self.get_working_environment_details_by_name(rest_api, headers,
+ self.parameters['source_working_environment_name'])
+ if error:
+ return None, None, error
+ else:
+ return None, None, "Cannot find working environment by source_working_environment_id or source_working_environment_name"
+
+ if self.parameters.get('destination_working_environment_id'):
+ if self.parameters['destination_working_environment_id'].startswith('fs-'):
+ if self.parameters.get('tenant_id'):
+ working_env_details, error = self.get_aws_fsx_details_by_id(rest_api, header=headers)
+ if error:
+ return None, None, "Error getting WE info for FSx: %s: %s" % (error, working_env_details)
+ dest_working_env_detail['publicId'] = self.parameters['destination_working_environment_id']
+ svm_name, error = self.get_aws_fsx_svm(rest_api, self.parameters['destination_working_environment_id'], header=headers)
+ if error:
+ return None, None, "Error getting svm name for FSx: %s" % error
+ dest_working_env_detail['svmName'] = svm_name
+ else:
+ return None, None, "Cannot find FSx WE by destination WE %s, missing tenant_id" % self.parameters['destination_working_environment_id']
+ else:
+ api = '/occm/api/working-environments'
+ working_env_details, error, dummy = rest_api.get(api, None, header=headers)
+ if error:
+ return None, None, "Error getting WE info: %s: %s" % (error, working_env_details)
+ for dummy, values in working_env_details.items():
+ for each in values:
+ if each['publicId'] == self.parameters['destination_working_environment_id']:
+ dest_working_env_detail = each
+ break
+ elif self.parameters.get('destination_working_environment_name'):
+ if self.parameters.get('tenant_id'):
+ fsx_id, error = self.get_aws_fsx_details_by_name(rest_api, header=headers)
+ if error:
+ return None, None, "Error getting WE info for FSx: %s" % error
+ dest_working_env_detail['publicId'] = fsx_id
+ svm_name, error = self.get_aws_fsx_svm(rest_api, fsx_id, header=headers)
+ if error:
+ return None, None, "Error getting svm name for FSx: %s" % error
+ dest_working_env_detail['svmName'] = svm_name
+ else:
+ dest_working_env_detail, error = self.get_working_environment_details_by_name(rest_api, headers,
+ self.parameters['destination_working_environment_name'])
+ if error:
+ return None, None, error
+ else:
+ return None, None, "Cannot find working environment by destination_working_environment_id or destination_working_environment_name"
+
+ return source_working_env_detail, dest_working_env_detail, None
+
+ def create_account(self, rest_api):
+ """
+ Create Account
+ :return: Account ID
+ """
+ # TODO? do we need to create an account? And the code below is broken
+ return None, 'Error: creating an account is not supported.'
+ # headers = {
+ # "X-User-Token": rest_api.token_type + " " + rest_api.token,
+ # }
+
+ # api = '/tenancy/account/MyAccount'
+ # account_res, error, dummy = rest_api.post(api, header=headers)
+ # account_id = None if error is not None else account_res['accountPublicId']
+ # return account_id, error
+
+ def get_or_create_account(self, rest_api):
+ """
+ Get Account
+ :return: Account ID
+ """
+ accounts, error = self.get_account_info(rest_api)
+ if error is not None:
+ return None, error
+ if len(accounts) == 0:
+ return None, 'Error: account cannot be located - check credentials or provide account_id.'
+ # TODO? creating an account is not supported
+ # return self.create_account(rest_api)
+
+ return accounts[0]['accountPublicId'], None
+
+ def get_account_info(self, rest_api, headers=None):
+ """
+ Get Account
+ :return: Account ID
+ """
+ headers = {
+ "X-User-Token": rest_api.token_type + " " + rest_api.token,
+ }
+
+ api = '/tenancy/account'
+ account_res, error, dummy = rest_api.get(api, header=headers)
+ if error is not None:
+ return None, error
+ return account_res, None
+
+ def get_account_id(self, rest_api):
+ accounts, error = self.get_account_info(rest_api)
+ if error:
+ return None, error
+ if not accounts:
+ return None, 'Error: no account found - check credentials or provide account_id.'
+ return accounts[0]['accountPublicId'], None
+
+ def get_accounts_info(self, rest_api, headers):
+ '''
+ Get all accounts info
+ '''
+ api = "/occm/api/accounts"
+ response, error, dummy = rest_api.get(api, None, header=headers)
+ if error is not None:
+ return None, error
+ else:
+ return response, None
+
+ def set_api_root_path(self, working_environment_details, rest_api):
+ '''
+ set API url root path based on the working environment provider
+ '''
+ provider = working_environment_details['cloudProviderName'] if working_environment_details.get('cloudProviderName') else None
+ api_root_path = None
+ if self.parameters['working_environment_id'].startswith('fs-'):
+ api_root_path = "/occm/api/fsx"
+ elif provider == "Amazon":
+ api_root_path = "/occm/api/aws/ha" if working_environment_details['isHA'] else "/occm/api/vsa"
+ elif working_environment_details['isHA']:
+ api_root_path = "/occm/api/" + provider.lower() + "/ha"
+ else:
+ api_root_path = "/occm/api/" + provider.lower() + "/vsa"
+ rest_api.api_root_path = api_root_path
+
+ def have_required_parameters(self, action):
+ '''
+ Check if all the required parameters in self.params are available or not besides the mandatory parameters
+ '''
+ actions = {'create_aggregate': ['number_of_disks', 'disk_size_size', 'disk_size_unit', 'working_environment_id'],
+ 'update_aggregate': ['number_of_disks', 'disk_size_size', 'disk_size_unit', 'working_environment_id'],
+ 'delete_aggregate': ['working_environment_id'],
+ }
+ missed_params = [
+ parameter
+ for parameter in actions[action]
+ if parameter not in self.parameters
+ ]
+
+ if not missed_params:
+ return True, None
+ else:
+ return False, missed_params
+
+ def get_modified_attributes(self, current, desired, get_list_diff=False):
+ ''' takes two dicts of attributes and return a dict of attributes that are
+ not in the current state
+ It is expected that all attributes of interest are listed in current and
+ desired.
+ :param: current: current attributes in ONTAP
+ :param: desired: attributes from playbook
+ :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
+ :return: dict of attributes to be modified
+ :rtype: dict
+
+ NOTE: depending on the attribute, the caller may need to do a modify or a
+ different operation (eg move volume if the modified attribute is an
+ aggregate name)
+ '''
+ # if the object does not exist, we can't modify it
+ modified = {}
+ if current is None:
+ return modified
+
+ # error out if keys do not match
+ # self.check_keys(current, desired)
+
+ # collect changed attributes
+ for key, value in current.items():
+ if key in desired and desired[key] is not None:
+ if isinstance(value, list):
+ modified_list = self.compare_lists(value, desired[key], get_list_diff) # get modified list from current and desired
+ if modified_list is not None:
+ modified[key] = modified_list
+ elif isinstance(value, dict):
+ modified_dict = self.get_modified_attributes(value, desired[key])
+ if modified_dict:
+ modified[key] = modified_dict
+ else:
+ try:
+ result = cmp(value, desired[key])
+ except TypeError as exc:
+ raise TypeError("%s, key: %s, value: %s, desired: %s" % (repr(exc), key, repr(value), repr(desired[key])))
+ else:
+ if result != 0:
+ modified[key] = desired[key]
+ if modified:
+ self.changed = True
+ return modified
+
+ @staticmethod
+ def compare_lists(current, desired, get_list_diff):
+ ''' compares two lists and return a list of elements that are either the desired elements or elements that are
+ modified from the current state depending on the get_list_diff flag
+ :param: current: current item attribute in ONTAP
+ :param: desired: attributes from playbook
+ :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
+ :return: list of attributes to be modified
+ :rtype: list
+ '''
+ current_copy = deepcopy(current)
+ desired_copy = deepcopy(desired)
+
+ # get what in desired and not in current
+ desired_diff_list = list()
+ for item in desired:
+ if item in current_copy:
+ current_copy.remove(item)
+ else:
+ desired_diff_list.append(item)
+
+ # get what in current but not in desired
+ current_diff_list = []
+ for item in current:
+ if item in desired_copy:
+ desired_copy.remove(item)
+ else:
+ current_diff_list.append(item)
+
+ if desired_diff_list or current_diff_list:
+ # there are changes
+ if get_list_diff:
+ return desired_diff_list
+ else:
+ return desired
+ else:
+ return None
+
+ @staticmethod
+ def convert_module_args_to_api(parameters, exclusion=None):
+ '''
+ Convert a list of string module args to API option format.
+ For example, convert test_option to testOption.
+ :param parameters: dict of parameters to be converted.
+ :param exclusion: list of parameters to be ignored.
+ :return: dict of key value pairs.
+ '''
+ exclude_list = ['api_url', 'token_type', 'refresh_token', 'sa_secret_key', 'sa_client_id']
+ if exclusion is not None:
+ exclude_list += exclusion
+ api_keys = {}
+ for k, v in parameters.items():
+ if k not in exclude_list:
+ words = k.split("_")
+ api_key = ""
+ for word in words:
+ if len(api_key) > 0:
+ word = word.title()
+ api_key += word
+ api_keys[api_key] = v
+ return api_keys
+
+ @staticmethod
+ def convert_data_to_tabbed_jsonstring(data):
+ '''
+ Convert a dictionary data to json format string
+ '''
+ dump = json.dumps(data, indent=2, separators=(',', ': '))
+ return re.sub(
+ '\n +',
+ lambda match: '\n' + '\t' * int(len(match.group().strip('\n')) / 2),
+ dump,
+ )
+
+ @staticmethod
+ def encode_certificates(certificate_file):
+ '''
+ Read certificate file and encode it
+ '''
+ try:
+ with open(certificate_file, mode='rb') as fh:
+ cert = fh.read()
+ except (OSError, IOError) as exc:
+ return None, str(exc)
+ if not cert:
+ return None, "Error: file is empty"
+ return base64.b64encode(cert).decode('utf-8'), None
+
+ @staticmethod
+ def get_occm_agents_by_account(rest_api, account_id):
+ """
+ Collect a list of agents matching account_id.
+ :return: list of agents, error
+ """
+ params = {'account_id': account_id}
+ api = "/agents-mgmt/agent"
+ headers = {
+ "X-User-Token": rest_api.token_type + " " + rest_api.token,
+ }
+ agents, error, dummy = rest_api.get(api, header=headers, params=params)
+ return agents, error
+
+ def get_occm_agents_by_name(self, rest_api, account_id, name, provider):
+ """
+ Collect a list of agents matching account_id, name, and provider.
+ :return: list of agents, error
+ """
+ # I tried to query by name and provider in addition to account_id, but it returned everything
+ agents, error = self.get_occm_agents_by_account(rest_api, account_id)
+ if isinstance(agents, dict) and 'agents' in agents:
+ agents = [agent for agent in agents['agents'] if agent['name'] == name and agent['provider'] == provider]
+ return agents, error
+
+ def get_agents_info(self, rest_api, headers):
+ """
+ Collect a list of agents matching account_id.
+ :return: list of agents, error
+ """
+ account_id, error = self.get_account_id(rest_api)
+ if error:
+ return None, error
+ agents, error = self.get_occm_agents_by_account(rest_api, account_id)
+ return agents, error
+
+ def get_active_agents_info(self, rest_api, headers):
+ """
+ Collect a list of agents matching account_id.
+ :return: list of agents, error
+ """
+ clients = []
+ account_id, error = self.get_account_id(rest_api)
+ if error:
+ return None, error
+ agents, error = self.get_occm_agents_by_account(rest_api, account_id)
+ if isinstance(agents, dict) and 'agents' in agents:
+ agents = [agent for agent in agents['agents'] if agent['status'] == 'active']
+ clients = [{'name': agent['name'], 'client_id': agent['agentId'], 'provider': agent['provider']} for agent in agents]
+ return clients, error
+
+ @staticmethod
+ def get_occm_agent_by_id(rest_api, client_id):
+ """
+ Fetch OCCM agent given its client id
+ :return: agent details, error
+ """
+ api = "/agents-mgmt/agent/" + rest_api.format_client_id(client_id)
+ headers = {
+ "X-User-Token": rest_api.token_type + " " + rest_api.token,
+ }
+ response, error, dummy = rest_api.get(api, header=headers)
+ if isinstance(response, dict) and 'agent' in response:
+ agent = response['agent']
+ return agent, error
+ return response, error
+
+ @staticmethod
+ def check_occm_status(rest_api, client_id):
+ """
+ Check OCCM status
+ :return: status
+ DEPRECATED - use get_occm_agent_by_id but the retrun value format is different!
+ """
+
+ api = "/agents-mgmt/agent/" + rest_api.format_client_id(client_id)
+ headers = {
+ "X-User-Token": rest_api.token_type + " " + rest_api.token,
+ }
+ occm_status, error, dummy = rest_api.get(api, header=headers)
+ return occm_status, error
+
+ def register_agent_to_service(self, rest_api, provider, vpc):
+ '''
+ register agent to service
+ '''
+ api = '/agents-mgmt/connector-setup'
+
+ headers = {
+ "X-User-Token": rest_api.token_type + " " + rest_api.token,
+ }
+ body = {
+ "accountId": self.parameters['account_id'],
+ "name": self.parameters['name'],
+ "company": self.parameters['company'],
+ "placement": {
+ "provider": provider,
+ "region": self.parameters['region'],
+ "network": vpc,
+ "subnet": self.parameters['subnet_id'],
+ },
+ "extra": {
+ "proxy": {
+ "proxyUrl": self.parameters.get('proxy_url'),
+ "proxyUserName": self.parameters.get('proxy_user_name'),
+ "proxyPassword": self.parameters.get('proxy_password'),
+ }
+ }
+ }
+
+ if provider == "AWS":
+ body['placement']['network'] = vpc
+
+ response, error, dummy = rest_api.post(api, body, header=headers)
+ return response, error
+
+ def delete_occm(self, rest_api, client_id):
+ '''
+ delete occm
+ '''
+ api = '/agents-mgmt/agent/' + rest_api.format_client_id(client_id)
+ headers = {
+ "X-User-Token": rest_api.token_type + " " + rest_api.token,
+ "X-Tenancy-Account-Id": self.parameters['account_id'],
+ }
+
+ occm_status, error, dummy = rest_api.delete(api, None, header=headers)
+ return occm_status, error
+
+ def delete_occm_agents(self, rest_api, agents):
+ '''
+ delete a list of occm
+ '''
+ results = []
+ for agent in agents:
+ if 'agentId' in agent:
+ occm_status, error = self.delete_occm(rest_api, agent['agentId'])
+ else:
+ occm_status, error = None, 'unexpected agent contents: %s' % repr(agent)
+ if error:
+ results.append((occm_status, error))
+ return results
+
+ @staticmethod
+ def call_parameters():
+ return """
+ {
+ "location": {
+ "value": "string"
+ },
+ "virtualMachineName": {
+ "value": "string"
+ },
+ "virtualMachineSize": {
+ "value": "string"
+ },
+ "networkSecurityGroupName": {
+ "value": "string"
+ },
+ "adminUsername": {
+ "value": "string"
+ },
+ "virtualNetworkId": {
+ "value": "string"
+ },
+ "adminPassword": {
+ "value": "string"
+ },
+ "subnetId": {
+ "value": "string"
+ },
+ "customData": {
+ "value": "string"
+ },
+ "environment": {
+ "value": "prod"
+ },
+ "storageAccount": {
+ "value": "string"
+ }
+ }
+ """
+
+ @staticmethod
+ def call_template():
+ return """
+ {
+ "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
+ "contentVersion": "1.0.0.0",
+ "parameters": {
+ "location": {
+ "type": "string",
+ "defaultValue": "eastus"
+ },
+ "virtualMachineName": {
+ "type": "string"
+ },
+ "virtualMachineSize":{
+ "type": "string"
+ },
+ "adminUsername": {
+ "type": "string"
+ },
+ "virtualNetworkId": {
+ "type": "string"
+ },
+ "networkSecurityGroupName": {
+ "type": "string"
+ },
+ "adminPassword": {
+ "type": "securestring"
+ },
+ "subnetId": {
+ "type": "string"
+ },
+ "customData": {
+ "type": "string"
+ },
+ "environment": {
+ "type": "string",
+ "defaultValue": "prod"
+ },
+ "storageAccount": {
+ "type": "string"
+ }
+ },
+ "variables": {
+ "vnetId": "[parameters('virtualNetworkId')]",
+ "subnetRef": "[parameters('subnetId')]",
+ "networkInterfaceName": "[concat(parameters('virtualMachineName'),'-nic')]",
+ "diagnosticsStorageAccountName": "[parameters('storageAccount')]",
+ "diagnosticsStorageAccountId": "[concat('Microsoft.Storage/storageAccounts/', variables('diagnosticsStorageAccountName'))]",
+ "diagnosticsStorageAccountType": "Standard_LRS",
+ "publicIpAddressName": "[concat(parameters('virtualMachineName'),'-ip')]",
+ "publicIpAddressType": "Dynamic",
+ "publicIpAddressSku": "Basic",
+ "msiExtensionName": "ManagedIdentityExtensionForLinux",
+ "occmOffer": "[if(equals(parameters('environment'), 'stage'), 'netapp-oncommand-cloud-manager-staging-preview', 'netapp-oncommand-cloud-manager')]"
+ },
+ "resources": [
+ {
+ "name": "[parameters('virtualMachineName')]",
+ "type": "Microsoft.Compute/virtualMachines",
+ "apiVersion": "2018-04-01",
+ "location": "[parameters('location')]",
+ "dependsOn": [
+ "[concat('Microsoft.Network/networkInterfaces/', variables('networkInterfaceName'))]",
+ "[concat('Microsoft.Storage/storageAccounts/', variables('diagnosticsStorageAccountName'))]"
+ ],
+ "properties": {
+ "osProfile": {
+ "computerName": "[parameters('virtualMachineName')]",
+ "adminUsername": "[parameters('adminUsername')]",
+ "adminPassword": "[parameters('adminPassword')]",
+ "customData": "[base64(parameters('customData'))]"
+ },
+ "hardwareProfile": {
+ "vmSize": "[parameters('virtualMachineSize')]"
+ },
+ "storageProfile": {
+ "imageReference": {
+ "publisher": "netapp",
+ "offer": "[variables('occmOffer')]",
+ "sku": "occm-byol",
+ "version": "latest"
+ },
+ "osDisk": {
+ "createOption": "fromImage",
+ "managedDisk": {
+ "storageAccountType": "Premium_LRS"
+ }
+ },
+ "dataDisks": []
+ },
+ "networkProfile": {
+ "networkInterfaces": [
+ {
+ "id": "[resourceId('Microsoft.Network/networkInterfaces', variables('networkInterfaceName'))]"
+ }
+ ]
+ },
+ "diagnosticsProfile": {
+ "bootDiagnostics": {
+ "enabled": true,
+ "storageUri":
+ "[concat('https://', variables('diagnosticsStorageAccountName'), '.blob.core.windows.net/')]"
+ }
+ }
+ },
+ "plan": {
+ "name": "occm-byol",
+ "publisher": "netapp",
+ "product": "[variables('occmOffer')]"
+ },
+ "identity": {
+ "type": "systemAssigned"
+ }
+ },
+ {
+ "apiVersion": "2017-12-01",
+ "type": "Microsoft.Compute/virtualMachines/extensions",
+ "name": "[concat(parameters('virtualMachineName'),'/', variables('msiExtensionName'))]",
+ "location": "[parameters('location')]",
+ "dependsOn": [
+ "[concat('Microsoft.Compute/virtualMachines/', parameters('virtualMachineName'))]"
+ ],
+ "properties": {
+ "publisher": "Microsoft.ManagedIdentity",
+ "type": "[variables('msiExtensionName')]",
+ "typeHandlerVersion": "1.0",
+ "autoUpgradeMinorVersion": true,
+ "settings": {
+ "port": 50342
+ }
+ }
+ },
+ {
+ "name": "[variables('diagnosticsStorageAccountName')]",
+ "type": "Microsoft.Storage/storageAccounts",
+ "apiVersion": "2015-06-15",
+ "location": "[parameters('location')]",
+ "properties": {
+ "accountType": "[variables('diagnosticsStorageAccountType')]"
+ }
+ },
+ {
+ "name": "[variables('networkInterfaceName')]",
+ "type": "Microsoft.Network/networkInterfaces",
+ "apiVersion": "2018-04-01",
+ "location": "[parameters('location')]",
+ "dependsOn": [
+ "[concat('Microsoft.Network/publicIpAddresses/', variables('publicIpAddressName'))]"
+ ],
+ "properties": {
+ "ipConfigurations": [
+ {
+ "name": "ipconfig1",
+ "properties": {
+ "subnet": {
+ "id": "[variables('subnetRef')]"
+ },
+ "privateIPAllocationMethod": "Dynamic",
+ "publicIpAddress": {
+ "id": "[resourceId(resourceGroup().name,'Microsoft.Network/publicIpAddresses', variables('publicIpAddressName'))]"
+ }
+ }
+ }
+ ],
+ "networkSecurityGroup": {
+ "id": "[parameters('networkSecurityGroupName')]"
+ }
+ }
+ },
+ {
+ "name": "[variables('publicIpAddressName')]",
+ "type": "Microsoft.Network/publicIpAddresses",
+ "apiVersion": "2017-08-01",
+ "location": "[parameters('location')]",
+ "properties": {
+ "publicIpAllocationMethod": "[variables('publicIpAddressType')]"
+ },
+ "sku": {
+ "name": "[variables('publicIpAddressSku')]"
+ }
+ }
+ ],
+ "outputs": {
+ "publicIpAddressName": {
+ "type": "string",
+ "value": "[variables('publicIpAddressName')]"
+ }
+ }
+ }
+ """
+
+ def get_tenant(self, rest_api, headers):
+ """
+ Get workspace ID (tenant)
+ """
+ api = '/occm/api/tenants'
+ response, error, dummy = rest_api.get(api, header=headers)
+ if error is not None:
+ return None, 'Error: unexpected response on getting tenant for cvo: %s, %s' % (str(error), str(response))
+
+ return response[0]['publicId'], None
+
+ def get_nss(self, rest_api, headers):
+ """
+ Get nss account
+ """
+ api = '/occm/api/accounts'
+ response, error, dummy = rest_api.get(api, header=headers)
+ if error is not None:
+ return None, 'Error: unexpected response on getting nss for cvo: %s, %s' % (str(error), str(response))
+
+ if len(response['nssAccounts']) == 0:
+ return None, "Error: could not find any NSS account"
+
+ return response['nssAccounts'][0]['publicId'], None
+
+ def get_working_environment_property(self, rest_api, headers, fields):
+ # GET /vsa/working-environments/{workingEnvironmentId}?fields=status,awsProperties,ontapClusterProperties
+ api = '%s/working-environments/%s' % (rest_api.api_root_path, self.parameters['working_environment_id'])
+ params = {'fields': ','.join(fields)}
+ response, error, dummy = rest_api.get(api, params=params, header=headers)
+ if error:
+ return None, "Error: get_working_environment_property %s" % error
+ return response, None
+
+ def user_tag_key_unique(self, tag_list, key_name):
+ checked_keys = []
+ for t in tag_list:
+ if t[key_name] in checked_keys:
+ return False, 'Error: %s %s must be unique' % (key_name, t[key_name])
+ else:
+ checked_keys.append(t[key_name])
+ return True, None
+
+ def current_label_exist(self, current, desired, is_ha=False):
+ current_key_set = set(current.keys())
+ # Ignore auto generated gcp label in CVO GCP HA
+ current_key_set.discard('gcp_resource_id')
+ current_key_set.discard('count-down')
+ if is_ha:
+ current_key_set.discard('partner-platform-serial-number')
+ # python 2.6 doe snot support set comprehension
+ desired_keys = set([a_dict['label_key'] for a_dict in desired])
+ if current_key_set.issubset(desired_keys):
+ return True, None
+ else:
+ return False, 'Error: label_key %s in gcp_label cannot be removed' % str(current_key_set)
+
+ def is_label_value_changed(self, current_tags, desired_tags):
+ tag_keys = list(current_tags.keys())
+ user_tag_keys = [key for key in tag_keys if
+ key not in ('count-down', 'gcp_resource_id', 'partner-platform-serial-number')]
+ desired_keys = [a_dict['label_key'] for a_dict in desired_tags]
+ if user_tag_keys == desired_keys:
+ for tag in desired_tags:
+ if current_tags[tag['label_key']] != tag['label_value']:
+ return True
+ return False
+ else:
+ return True
+
+ def compare_gcp_labels(self, current_tags, user_tags, is_ha):
+ '''
+ Update user-tag API behaves differently in GCP CVO.
+ It only supports adding gcp_labels and modifying the values of gcp_labels. Removing gcp_label is not allowed.
+ '''
+ # check if any current gcp_labels are going to be removed or not
+ # gcp HA has one extra gcp_label created automatically
+ resp, error = self.user_tag_key_unique(user_tags, 'label_key')
+ if error is not None:
+ return None, error
+ # check if any current key labels are in the desired key labels
+ resp, error = self.current_label_exist(current_tags, user_tags, is_ha)
+ if error is not None:
+ return None, error
+ if self.is_label_value_changed(current_tags, user_tags):
+ return True, None
+ else:
+ # no change
+ return None, None
+
+ def compare_cvo_tags_labels(self, current_tags, user_tags):
+ '''
+ Compare exiting tags/labels and user input tags/labels to see if there is a change
+ gcp_labels: label_key, label_value
+ aws_tag/azure_tag: tag_key, tag_label
+ '''
+ # azure has one extra azure_tag DeployedByOccm created automatically and it cannot be modified.
+ tag_keys = list(current_tags.keys())
+ user_tag_keys = [key for key in tag_keys if key != 'DeployedByOccm']
+ current_len = len(user_tag_keys)
+ resp, error = self.user_tag_key_unique(user_tags, 'tag_key')
+ if error is not None:
+ return None, error
+ if len(user_tags) != current_len:
+ return True, None
+ # Check if tags/labels of desired configuration in current working environment
+ for item in user_tags:
+ if item['tag_key'] in current_tags and item['tag_value'] != current_tags[item['tag_key']]:
+ return True, None
+ elif item['tag_key'] not in current_tags:
+ return True, None
+ return False, None
+
+ def is_cvo_tags_changed(self, rest_api, headers, parameters, tag_name):
+ '''
+ Since tags/laabels are CVO optional parameters, this function needs to cover with/without tags/labels on both lists
+ '''
+ # get working environment details by working environment ID
+ current, error = self.get_working_environment_details(rest_api, headers)
+ if error is not None:
+ return None, 'Error: Cannot find working environment %s error: %s' % (self.parameters['working_environment_id'], str(error))
+ self.set_api_root_path(current, rest_api)
+ # compare tags
+ # no tags in current cvo
+ if 'userTags' not in current or len(current['userTags']) == 0:
+ return tag_name in parameters, None
+
+ if tag_name == 'gcp_labels':
+ if tag_name in parameters:
+ return self.compare_gcp_labels(current['userTags'], parameters[tag_name], current['isHA'])
+ # if both are empty, no need to update
+ # Ignore auto generated gcp label in CVO GCP
+ # 'count-down', 'gcp_resource_id', and 'partner-platform-serial-number'(HA)
+ tag_keys = list(current['userTags'].keys())
+ user_tag_keys = [key for key in tag_keys if key not in ('count-down', 'gcp_resource_id', 'partner-platform-serial-number')]
+ if not user_tag_keys:
+ return False, None
+ else:
+ return None, 'Error: Cannot remove current gcp_labels'
+ # no tags in input parameters
+ if tag_name not in parameters:
+ return True, None
+ else:
+ # has tags in input parameters and existing CVO
+ return self.compare_cvo_tags_labels(current['userTags'], parameters[tag_name])
+
+ def get_license_type(self, rest_api, headers, provider, region, instance_type, ontap_version, license_name):
+ # Permutation query example:
+ # aws: /metadata/permutations?region=us-east-1&instance_type=m5.xlarge&version=ONTAP-9.10.1.T1
+ # azure: /metadata/permutations?region=westus&instance_type=Standard_E4s_v3&version=ONTAP-9.10.1.T1.azure
+ # gcp: /metadata/permutations?region=us-east1&instance_type=n2-standard-4&version=ONTAP-9.10.1.T1.gcp
+ # The examples of the ontapVersion in ontapClusterProperties response:
+ # AWS for both single and HA: 9.10.1RC1, 9.8
+ # AZURE single: 9.10.1RC1.T1.azure. For HA: 9.10.1RC1.T1.azureha
+ # GCP for both single and HA: 9.10.1RC1.T1, 9.8.T1
+ # To be used in permutation:
+ # AWS ontap_version format: ONTAP-x.x.x.T1 or ONTAP-x.x.x.T1.ha for Ha
+ # AZURE ontap_version format: ONTAP-x.x.x.T1.azure or ONTAP-x.x.x.T1.azureha for HA
+ # GCP ontap_version format: ONTAP-x.x.x.T1.gcp or ONTAP-x.x.x.T1.gcpha for HA
+ version = 'ONTAP-' + ontap_version
+ if provider == 'aws':
+ version += '.T1.ha' if self.parameters['is_ha'] else '.T1'
+ elif provider == 'gcp':
+ version += '.T1' if not ontap_version.endswith('T1') else ''
+ version += '.gcpha' if self.parameters['is_ha'] else '.gcp'
+ api = '%s/metadata/permutations' % rest_api.api_root_path
+ params = {'region': region,
+ 'version': version,
+ 'instance_type': instance_type
+ }
+ response, error, dummy = rest_api.get(api, params=params, header=headers)
+ if error:
+ return None, "Error: get_license_type %s %s" % (response, error)
+ for item in response:
+ if item['license']['name'] == license_name:
+ return item['license']['type'], None
+
+ return None, "Error: get_license_type cannot get license type %s" % response
+
+ def get_modify_cvo_params(self, rest_api, headers, desired, provider):
+ modified = []
+ if desired['update_svm_password']:
+ modified = ['svm_password']
+ # Get current working environment property
+ properties = ['status', 'ontapClusterProperties.fields(upgradeVersions)']
+ # instanceType in aws case is stored in awsProperties['instances'][0]['instanceType']
+ if provider == 'aws':
+ properties.append('awsProperties')
+ else:
+ properties.append('providerProperties')
+
+ we, err = self.get_working_environment_property(rest_api, headers, properties)
+
+ if err is not None:
+ return None, err
+
+ if we['status'] is None or we['status']['status'] != 'ON':
+ return None, "Error: get_modify_cvo_params working environment %s status is not ON. Operation cannot be performed." % we['publicId']
+
+ tier_level = None
+ if we['ontapClusterProperties']['capacityTierInfo'] is not None:
+ tier_level = we['ontapClusterProperties']['capacityTierInfo']['tierLevel']
+
+ # collect changed attributes
+ if tier_level is not None and tier_level != desired['tier_level']:
+ if provider == 'azure':
+ if desired['capacity_tier'] == 'Blob':
+ modified.append('tier_level')
+ elif provider == 'aws':
+ if desired['capacity_tier'] == 'S3':
+ modified.append('tier_level')
+ elif provider == 'gcp':
+ if desired['capacity_tier'] == 'cloudStorage':
+ modified.append('tier_level')
+
+ if 'svm_name' in desired and we['svmName'] != desired['svm_name']:
+ modified.append('svm_name')
+
+ if 'writing_speed_state' in desired:
+ if we['ontapClusterProperties']['writingSpeedState'] != desired['writing_speed_state'].upper():
+ modified.append('writing_speed_state')
+
+ if provider == 'aws':
+ current_instance_type = we['awsProperties']['instances'][0]['instanceType']
+ region = we['awsProperties']['regionName']
+ else:
+ current_instance_type = we['providerProperties']['instanceType']
+ region = we['providerProperties']['regionName']
+
+ if current_instance_type != desired['instance_type']:
+ modified.append('instance_type')
+
+ # check if license type is changed
+ current_license_type, error = self.get_license_type(rest_api, headers, provider, region, current_instance_type,
+ we['ontapClusterProperties']['ontapVersion'],
+ we['ontapClusterProperties']['licenseType']['name'])
+ if err is not None:
+ return None, error
+ if current_license_type != desired['license_type']:
+ modified.append('license_type')
+
+ if desired['upgrade_ontap_version'] is True:
+ if desired['use_latest_version'] or desired['ontap_version'] == 'latest':
+ return None, "Error: To upgrade ONTAP image, the ontap_version must be a specific version"
+ current_version = 'ONTAP-' + we['ontapClusterProperties']['ontapVersion']
+ if not desired['ontap_version'].startswith(current_version):
+ if we['ontapClusterProperties']['upgradeVersions'] is not None:
+ available_versions = []
+ for image_info in we['ontapClusterProperties']['upgradeVersions']:
+ available_versions.append(image_info['imageVersion'])
+ # AWS ontap_version format: ONTAP-x.x.x.Tx or ONTAP-x.x.x.Tx.ha for Ha
+ # AZURE ontap_version format: ONTAP-x.x.x.Tx.azure or .azureha for HA
+ # GCP ontap_version format: ONTAP-x.x.x.Tx.gcp or .gcpha for HA
+ # Tx is not relevant for ONTAP version. But it is needed for the CVO creation
+ # upgradeVersion imageVersion format: ONTAP-x.x.x
+ if desired['ontap_version'].startswith(image_info['imageVersion']):
+ modified.append('ontap_version')
+ break
+ else:
+ return None, "Error: No ONTAP image available for version %s. Available versions: %s" % (desired['ontap_version'], available_versions)
+
+ tag_name = {
+ 'aws': 'aws_tag',
+ 'azure': 'azure_tag',
+ 'gcp': 'gcp_labels'
+ }
+
+ need_change, error = self.is_cvo_tags_changed(rest_api, headers, desired, tag_name[provider])
+ if error is not None:
+ return None, error
+ if need_change:
+ modified.append(tag_name[provider])
+
+ # The updates of followings are not supported. Will response failure.
+ for key, value in desired.items():
+ if key == 'project_id' and we['providerProperties']['projectName'] != value:
+ modified.append('project_id')
+ if key == 'zone' and we['providerProperties']['zoneName'][0] != value:
+ modified.append('zone')
+ if key == 'cidr' and we['providerProperties']['vnetCidr'] != value:
+ modified.append('cidr')
+ if key == 'location' and we['providerProperties']['regionName'] != value:
+ modified.append('location')
+ if key == 'availability_zone' and we['providerProperties']['availabilityZone'] != value:
+ modified.append('availability_zone')
+
+ if modified:
+ self.changed = True
+ return modified, None
+
+ def is_cvo_update_needed(self, rest_api, headers, parameters, changeable_params, provider):
+ modify, error = self.get_modify_cvo_params(rest_api, headers, parameters, provider)
+ if error is not None:
+ return None, error
+ unmodifiable = [attr for attr in modify if attr not in changeable_params]
+ if unmodifiable:
+ return None, "%s cannot be modified." % str(unmodifiable)
+
+ return modify, None
+
+ def wait_cvo_update_complete(self, rest_api, headers):
+ retry_count = 65
+ if self.parameters['is_ha'] is True:
+ retry_count *= 2
+ for count in range(retry_count):
+ # get CVO status
+ we, err = self.get_working_environment_property(rest_api, headers, ['status'])
+ if err is not None:
+ return False, 'Error: get_working_environment_property failed: %s' % (str(err))
+ if we['status']['status'] != "UPDATING":
+ return True, None
+ time.sleep(60)
+
+ return False, 'Error: Taking too long for CVO to be active after update or not properly setup'
+
+ def update_cvo_tags(self, api_root, rest_api, headers, tag_name, tag_list):
+ body = {}
+ tags = []
+ if tag_list is not None:
+ for tag in tag_list:
+ atag = {
+ 'tagKey': tag['label_key'] if tag_name == "gcp_labels" else tag['tag_key'],
+ 'tagValue': tag['label_value'] if tag_name == "gcp_labels" else tag['tag_value']
+ }
+ tags.append(atag)
+ body['tags'] = tags
+
+ response, err, dummy = rest_api.put(api_root + "user-tags", body, header=headers)
+ if err is not None:
+ return False, 'Error: unexpected response on modifying tags: %s, %s' % (str(err), str(response))
+
+ return True, None
+
+ def update_svm_password(self, api_root, rest_api, headers, svm_password):
+ body = {'password': svm_password}
+ response, err, dummy = rest_api.put(api_root + "set-password", body, header=headers)
+ if err is not None:
+ return False, 'Error: unexpected response on modifying svm_password: %s, %s' % (str(err), str(response))
+
+ return True, None
+
+ def update_svm_name(self, api_root, rest_api, headers, svm_name):
+ # get current svmName
+ we, err = self.get_working_environment_property(rest_api, headers, ['ontapClusterProperties.fields(upgradeVersions)'])
+ if err is not None:
+ return False, 'Error: get_working_environment_property failed: %s' % (str(err))
+ body = {'svmNewName': svm_name,
+ 'svmName': we['svmName']}
+ response, err, dummy = rest_api.put(api_root + "svm", body, header=headers)
+ if err is not None:
+ return False, "update svm_name error"
+ return True, None
+
+ def update_tier_level(self, api_root, rest_api, headers, tier_level):
+ body = {'level': tier_level}
+ response, err, dummy = rest_api.post(api_root + "change-tier-level", body, header=headers)
+ if err is not None:
+ return False, 'Error: unexpected response on modify tier_level: %s, %s' % (str(err), str(response))
+
+ return True, None
+
+ def update_writing_speed_state(self, api_root, rest_api, headers, writing_speed_state):
+ body = {'writingSpeedState': writing_speed_state.upper()}
+ response, err, dummy = rest_api.put(api_root + "writing-speed", body, header=headers)
+ if err is not None:
+ return False, 'Error: unexpected response on modify writing_speed_state: %s, %s' % (str(err), str(response))
+ # check upgrade status
+ dummy, err = self.wait_cvo_update_complete(rest_api, headers)
+ return err is None, err
+
+ def update_instance_license_type(self, api_root, rest_api, headers, instance_type, license_type):
+ body = {'instanceType': instance_type,
+ 'licenseType': license_type}
+ response, err, dummy = rest_api.put(api_root + "license-instance-type", body, header=headers)
+ if err is not None:
+ return False, 'Error: unexpected response on modify instance_type and license_type: %s, %s' % (str(err), str(response))
+ # check upgrade status
+ dummy, err = self.wait_cvo_update_complete(rest_api, headers)
+ return err is None, err
+
+ def set_config_flag(self, rest_api, headers):
+ body = {'value': True, 'valueType': 'BOOLEAN'}
+ base_url = '/occm/api/occm/config/skip-eligibility-paygo-upgrade'
+ response, err, dummy = rest_api.put(base_url, body, header=headers)
+ if err is not None:
+ return False, "set_config_flag error"
+
+ return True, None
+
+ def do_ontap_image_upgrade(self, rest_api, headers, desired):
+ # get ONTAP image version
+ we, err = self.get_working_environment_property(rest_api, headers, ['ontapClusterProperties.fields(upgradeVersions)'])
+ if err is not None:
+ return False, 'Error: get_working_environment_property failed: %s' % (str(err))
+ body = {'updateType': "OCCM_PROVIDED"}
+ for image_info in we['ontapClusterProperties']['upgradeVersions']:
+ if image_info['imageVersion'] in desired:
+ body['updateParameter'] = image_info['imageVersion']
+ break
+ # upgrade
+ base_url = "%s/working-environments/%s/update-image" % (rest_api.api_root_path, self.parameters['working_environment_id'])
+ response, err, dummy = rest_api.post(base_url, body, header=headers)
+ if err is not None:
+ return False, 'Error: unexpected response on do_ontap_image_upgrade: %s, %s' % (str(err), str(response))
+ else:
+ return True, None
+
+ def wait_ontap_image_upgrade_complete(self, rest_api, headers, desired):
+ retry_count = 65
+ if self.parameters['is_ha'] is True:
+ retry_count *= 2
+ for count in range(retry_count):
+ # get CVO status
+ we, err = self.get_working_environment_property(rest_api, headers, ['status', 'ontapClusterProperties'])
+ if err is not None:
+ return False, 'Error: get_working_environment_property failed: %s' % (str(err))
+ if we['status']['status'] != "UPDATING" and we['ontapClusterProperties']['ontapVersion'] != "":
+ if we['ontapClusterProperties']['ontapVersion'] in desired:
+ return True, None
+ time.sleep(60)
+
+ return False, 'Error: Taking too long for CVO to be active or not properly setup'
+
+ def upgrade_ontap_image(self, rest_api, headers, desired):
+ # set flag
+ dummy, err = self.set_config_flag(rest_api, headers)
+ if err is not None:
+ return False, err
+ # upgrade
+ dummy, err = self.do_ontap_image_upgrade(rest_api, headers, desired)
+ if err is not None:
+ return False, err
+ # check upgrade status
+ dummy, err = self.wait_ontap_image_upgrade_complete(rest_api, headers, desired)
+ return err is None, err
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aggregate.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aggregate.py
new file mode 100644
index 000000000..9533d5f91
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aggregate.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_aggregate
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_aggregate
+short_description: NetApp Cloud Manager Aggregate
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, Modify or Delete Aggregate on Cloud Manager.
+
+options:
+ state:
+ description:
+ - Whether the specified aggregate should exist or not.
+ choices: ['present', 'absent']
+ required: true
+ type: str
+
+ name:
+ description:
+ - The name of the new aggregate.
+ required: true
+ type: str
+
+ working_environment_name:
+ description:
+ - The working environment name where the aggregate will be created.
+ type: str
+
+ working_environment_id:
+ description:
+ - The public ID of the working environment where the aggregate will be created.
+ type: str
+
+ client_id:
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ required: true
+ type: str
+
+ number_of_disks:
+ description:
+ - The required number of disks in the new aggregate.
+ type: int
+
+ disk_size_size:
+ description:
+ - The required size of the disks.
+ type: int
+
+ disk_size_unit:
+ description:
+ - The disk size unit ['GB' or 'TB']. The default is 'TB'.
+ choices: ['GB', 'TB']
+ default: 'TB'
+ type: str
+
+ home_node:
+ description:
+ - The home node that the new aggregate should belong to.
+ type: str
+
+ provider_volume_type:
+ description:
+ - The cloud provider volume type.
+ type: str
+
+ capacity_tier:
+ description:
+ - The aggregate's capacity tier for tiering cold data to object storage.
+ - If the value is NONE, the capacity_tier will not be set on aggregate creation.
+ choices: [ 'NONE', 'S3', 'Blob', 'cloudStorage']
+ type: str
+
+ iops:
+ description:
+ - Provisioned IOPS. Needed only when providerVolumeType is "io1".
+ type: int
+
+ throughput:
+ description:
+ - Unit is Mb/s. Valid range 125-1000.
+ - Required only when provider_volume_type is 'gp3'.
+ type: int
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = '''
+- name: Create Aggregate
+ netapp.cloudmanager.na_cloudmanager_aggregate:
+ state: present
+ name: AnsibleAggregate
+ working_environment_name: testAWS
+ client_id: "{{ client_id }}"
+ number_of_disks: 2
+ refresh_token: xxx
+
+- name: Delete Volume
+ netapp.cloudmanager.na_cloudmanager_aggregate:
+ state: absent
+ name: AnsibleAggregate
+ working_environment_name: testAWS
+ client_id: "{{ client_id }}"
+ refresh_token: xxx
+'''
+
+RETURN = '''
+msg:
+ description: Success message.
+ returned: success
+ type: str
+ sample: "Aggregate Created"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+
+class NetAppCloudmanagerAggregate(object):
+ '''
+ Contains methods to parse arguments,
+ derive details of CloudmanagerAggregate objects
+ and send requests to CloudmanagerAggregate via
+ the restApi
+ '''
+
+ def __init__(self):
+ '''
+ Parse arguments, setup state variables,
+ check parameters and ensure request module is installed
+ '''
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ working_environment_id=dict(required=False, type='str'),
+ working_environment_name=dict(required=False, type='str'),
+ client_id=dict(required=True, type='str'),
+ number_of_disks=dict(required=False, type='int'),
+ disk_size_size=dict(required=False, type='int'),
+ disk_size_unit=dict(required=False, choices=['GB', 'TB'], default='TB'),
+ home_node=dict(required=False, type='str'),
+ provider_volume_type=dict(required=False, type='str'),
+ capacity_tier=dict(required=False, choices=['NONE', 'S3', 'Blob', 'cloudStorage'], type='str'),
+ iops=dict(required=False, type='int'),
+ throughput=dict(required=False, type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[
+ ['refresh_token', 'sa_client_id'],
+ ['working_environment_name', 'working_environment_id'],
+ ],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ required_if=[
+ ['provider_volume_type', 'gp3', ['iops', 'throughput']],
+ ['provider_volume_type', 'io1', ['iops']],
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Calling generic rest_api class
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.rest_api.api_root_path = None
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+
+ def get_aggregate(self):
+ '''
+ Get aggregate details
+ '''
+ working_environment_detail = None
+ if 'working_environment_id' in self.parameters:
+ working_environment_detail, error = self.na_helper.get_working_environment_details(self.rest_api, self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: Cannot find working environment: %s" % str(error))
+ elif 'working_environment_name' in self.parameters:
+ working_environment_detail, error = self.na_helper.get_working_environment_details_by_name(self.rest_api,
+ self.headers,
+ self.parameters['working_environment_name'])
+ if error is not None:
+ self.module.fail_json(msg="Error: Cannot find working environment: %s" % str(error))
+ else:
+ self.module.fail_json(msg="Error: Missing working environment information")
+ if working_environment_detail is not None:
+ self.parameters['working_environment_id'] = working_environment_detail['publicId']
+ self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
+ api_root_path = self.rest_api.api_root_path
+
+ if working_environment_detail['cloudProviderName'] != "Amazon":
+ api = '%s/aggregates/%s' % (api_root_path, working_environment_detail['publicId'])
+ else:
+ api = '%s/aggregates?workingEnvironmentId=%s' % (api_root_path, working_environment_detail['publicId'])
+ response, error, dummy = self.rest_api.get(api, header=self.headers)
+ if error:
+ self.module.fail_json(msg="Error: Failed to get aggregate list: %s, %s" % (str(error), str(response)))
+ for aggr in response:
+ if aggr['name'] == self.parameters['name']:
+ return aggr
+ return None
+
+ def create_aggregate(self):
+ '''
+ Create aggregate
+ '''
+ api = '%s/aggregates' % self.rest_api.api_root_path
+ # check if all the required parameters exist
+ body = {
+ 'name': self.parameters['name'],
+ 'workingEnvironmentId': self.parameters['working_environment_id'],
+ 'numberOfDisks': self.parameters['number_of_disks'],
+ 'diskSize': {'size': self.parameters['disk_size_size'],
+ 'unit': self.parameters['disk_size_unit']},
+ }
+ # optional parameters
+ if 'home_node' in self.parameters:
+ body['homeNode'] = self.parameters['home_node']
+ if 'provider_volume_type' in self.parameters:
+ body['providerVolumeType'] = self.parameters['provider_volume_type']
+ if 'capacity_tier' in self.parameters and self.parameters['capacity_tier'] != "NONE":
+ body['capacityTier'] = self.parameters['capacity_tier']
+ if 'iops' in self.parameters:
+ body['iops'] = self.parameters['iops']
+ if 'throughput' in self.parameters:
+ body['throughput'] = self.parameters['throughput']
+ response, error, dummy = self.rest_api.post(api, body, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on aggregate creation: %s, %s" % (str(error), str(response)))
+
+ def update_aggregate(self, add_number_of_disks):
+ '''
+ Update aggregate with aggregate name and the parameters number_of_disks will be added
+ '''
+ api = '%s/aggregates/%s/%s/disks' % (self.rest_api.api_root_path, self.parameters['working_environment_id'],
+ self.parameters['name'])
+ body = {
+ 'aggregateName': self.parameters['name'],
+ 'workingEnvironmentId': self.parameters['working_environment_id'],
+ 'numberOfDisks': add_number_of_disks
+ }
+ response, error, dummy = self.rest_api.post(api, body, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on aggregate adding disks: %s, %s" % (str(error), str(response)))
+
+ def delete_aggregate(self):
+ '''
+ Delete aggregate with aggregate name
+ '''
+ api = '%s/aggregates/%s/%s' % (self.rest_api.api_root_path, self.parameters['working_environment_id'],
+ self.parameters['name'])
+ body = {
+ 'aggregateName': self.parameters['name'],
+ 'workingEnvironmentId': self.parameters['working_environment_id'],
+ }
+ response, error, dummy = self.rest_api.delete(api, body, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on aggregate deletion: %s, %s" % (str(error), str(response)))
+
+ def apply(self):
+ '''
+ Check, process and initiate aggregate operation
+ '''
+ # check if aggregate exists
+ current = self.get_aggregate()
+ # check the action
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ action = cd_action + "_aggregate"
+ have_all_required, missed_params = self.na_helper.have_required_parameters(action)
+ if not have_all_required:
+ self.module.fail_json(msg="Error: Missing required parameters (%s) on %s" % (str(missed_params), action))
+ add_disks = 0
+ if current and self.parameters['state'] != 'absent':
+ have_all_required, missed_params = self.na_helper.have_required_parameters("update_aggregate")
+ if not have_all_required:
+ self.module.fail_json(msg="Error: Missing required parameters (%s) on update_aggregate" % str(missed_params))
+ if len(current['disks']) < self.parameters['number_of_disks']:
+ add_disks = self.parameters['number_of_disks'] - len(current['disks'])
+ self.na_helper.changed = True
+ elif len(current['disks']) > self.parameters['number_of_disks']:
+ self.module.fail_json(msg="Error: Only add disk support. number_of_disks cannot be reduced")
+
+ result_message = ""
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == "create":
+ self.create_aggregate()
+ result_message = "Aggregate Created"
+ elif cd_action == "delete":
+ self.delete_aggregate()
+ result_message = "Aggregate Deleted"
+ else: # modify
+ self.update_aggregate(add_disks)
+ result_message = "Aggregate Updated"
+ self.module.exit_json(changed=self.na_helper.changed, msg=result_message)
+
+
+def main():
+ '''
+ Create NetAppCloudmanagerAggregate class instance and invoke apply
+ :return: None
+ '''
+ na_cloudmanager_aggregate = NetAppCloudmanagerAggregate()
+ na_cloudmanager_aggregate.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aws_fsx.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aws_fsx.py
new file mode 100644
index 000000000..8e757b989
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aws_fsx.py
@@ -0,0 +1,458 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_aws_fsx
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_aws_fsx
+short_description: Cloud ONTAP file system(FSx) in AWS
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.13.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or delete CVO/Working Environment for AWS FSx.
+
+options:
+
+ state:
+ description:
+ - Whether the specified FSx in AWS should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the CVO/Working Environment for AWS FSx to manage.
+ type: str
+
+ region:
+ description:
+ - The region where the working environment will be created.
+ type: str
+
+ aws_credentials_name:
+ description:
+ - The name of the AWS Credentials account name.
+ type: str
+
+ workspace_id:
+ description:
+ - The ID of the Cloud Manager workspace of working environment.
+ type: str
+
+ tenant_id:
+ required: true
+ description:
+ - The NetApp account ID that the File System will be associated with.
+ type: str
+
+ working_environment_id:
+ description:
+ - The ID of the AWS FSx working environment used for delete.
+ type: str
+
+ storage_capacity_size:
+ description:
+ - volume size for the first data aggregate.
+ - For GB, the value can be [100 or 500].
+ - For TB, the value can be [1,2,4,8,16].
+ type: int
+
+ storage_capacity_size_unit:
+ description:
+ - The unit for volume size.
+ choices: ['GiB', 'TiB']
+ type: str
+
+ fsx_admin_password:
+ description:
+ - The admin password for Cloud Volumes ONTAP fsxadmin user.
+ type: str
+
+ throughput_capacity:
+ description:
+ - The capacity of the throughput.
+ choices: [512, 1024, 2048]
+ type: int
+
+ security_group_ids:
+ description:
+ - The IDs of the security groups for the working environment, multiple security groups can be provided separated by ','.
+ type: list
+ elements: str
+
+ kms_key_id:
+ description:
+ - AWS encryption parameters. It is required if using aws encryption.
+ type: str
+
+ tags:
+ description:
+ - Additional tags for the FSx AWS working environment.
+ type: list
+ elements: dict
+ suboptions:
+ tag_key:
+ description: The key of the tag.
+ type: str
+ tag_value:
+ description: The tag value.
+ type: str
+
+ primary_subnet_id:
+ description:
+ - The subnet ID of the first node.
+ type: str
+
+ secondary_subnet_id:
+ description:
+ - The subnet ID of the second node.
+ type: str
+
+ route_table_ids:
+ description:
+ - The list of route table IDs that will be updated with the floating IPs.
+ type: list
+ elements: str
+
+ minimum_ssd_iops:
+ description:
+ - Provisioned SSD IOPS.
+ type: int
+
+ endpoint_ip_address_range:
+ description:
+ - The endpoint IP address range.
+ type: str
+
+ import_file_system:
+ description:
+ - bool option to existing import AWS file system to CloudManager.
+ type: bool
+ default: false
+ version_added: 21.17.0
+
+ file_system_id:
+ description:
+ - The AWS file system ID to import to CloudManager. Required when import_file_system is 'True'
+ type: str
+ version_added: 21.17.0
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = """
+- name: Create NetApp AWS FSx
+ netapp.cloudmanager.na_cloudmanager_aws_fsx:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: fsxAnsible
+ region: us-east-2
+ workspace_id: workspace-xxxxx
+ tenant_id: account-xxxxx
+ storage_capacity_size: 1024
+ storage_capacity_size_unit: TiB
+ aws_credentials_name: xxxxxxx
+ primary_subnet_id: subnet-xxxxxx
+ secondary_subnet_id: subnet-xxxxx
+ throughput_capacity: 512
+ fsx_admin_password: xxxxxxx
+ tags: [
+ {tag_key: abcd,
+ tag_value: ABCD}]
+
+- name: Import AWS FSX
+ na_cloudmanager_aws_fsx:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: fsxAnsible
+ region: us-west-2
+ workspace_id: workspace-xxxxx
+ import_file_system: True
+ file_system_id: "{{ xxxxxxxxxxxxxxx }}"
+ tenant_id: account-xxxxx
+ aws_credentials_name: xxxxxxx
+
+- name: Delete NetApp AWS FSx
+ netapp.cloudmanager.na_cloudmanager_aws_fsx:
+ state: absent
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ working_environment_id: fs-xxxxxx
+ name: fsxAnsible
+ tenant_id: account-xxxxx
+"""
+
+RETURN = '''
+working_environment_id:
+ description: Newly created AWS FSx working_environment_id.
+ type: str
+ returned: success
+'''
+
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+
+class NetAppCloudManagerAWSFSX:
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ region=dict(required=False, type='str'),
+ aws_credentials_name=dict(required=False, type='str'),
+ workspace_id=dict(required=False, type='str'),
+ tenant_id=dict(required=True, type='str'),
+ working_environment_id=dict(required=False, type='str'),
+ storage_capacity_size=dict(required=False, type='int'),
+ storage_capacity_size_unit=dict(required=False, type='str', choices=['GiB', 'TiB']),
+ fsx_admin_password=dict(required=False, type='str', no_log=True),
+ throughput_capacity=dict(required=False, type='int', choices=[512, 1024, 2048]),
+ security_group_ids=dict(required=False, type='list', elements='str'),
+ kms_key_id=dict(required=False, type='str', no_log=True),
+ tags=dict(required=False, type='list', elements='dict', options=dict(
+ tag_key=dict(type='str', no_log=False),
+ tag_value=dict(type='str')
+ )),
+ primary_subnet_id=dict(required=False, type='str'),
+ secondary_subnet_id=dict(required=False, type='str'),
+ route_table_ids=dict(required=False, type='list', elements='str'),
+ minimum_ssd_iops=dict(required=False, type='int'),
+ endpoint_ip_address_range=dict(required=False, type='str'),
+ import_file_system=dict(required=False, type='bool', default=False),
+ file_system_id=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ['state', 'present', ['region', 'aws_credentials_name', 'workspace_id', 'fsx_admin_password', 'throughput_capacity',
+ 'primary_subnet_id', 'secondary_subnet_id', 'storage_capacity_size', 'storage_capacity_size_unit']],
+ ['import_file_system', True, ['file_system_id']]
+ ],
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key'], ['storage_capacity_size', 'storage_capacity_size_unit']],
+ supports_check_mode=True,
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.headers = None
+ if self.rest_api.simulator:
+ self.headers = {
+ 'x-simulator': 'true'
+ }
+ if self.parameters['state'] == 'present':
+ self.aws_credentials_id, error = self.get_aws_credentials_id()
+ if error is not None:
+ self.module.fail_json(msg=str(error))
+
+ def get_aws_credentials_id(self):
+ """
+ Get aws_credentials_id
+ :return: AWS Credentials ID
+ """
+ api = "/fsx-ontap/aws-credentials/"
+ api += self.parameters['tenant_id']
+ response, error, dummy = self.rest_api.get(api, None, header=self.headers)
+ if error:
+ return response, "Error: getting aws_credentials_id %s" % error
+ for each in response:
+ if each['name'] == self.parameters['aws_credentials_name']:
+ return each['id'], None
+ return None, "Error: aws_credentials_name not found"
+
+ def discover_aws_fsx(self):
+ """
+ discover aws_fsx
+ """
+ api = "/fsx-ontap/working-environments/%s/discover?credentials-id=%s&workspace-id=%s&region=%s"\
+ % (self.parameters['tenant_id'], self.aws_credentials_id, self.parameters['workspace_id'], self.parameters['region'])
+ response, error, dummy = self.rest_api.get(api, None, header=self.headers)
+ if error:
+ return "Error: discovering aws_fsx %s" % error
+ id_found = False
+ for each in response:
+ if each['id'] == self.parameters['file_system_id']:
+ id_found = True
+ break
+ if not id_found:
+ return "Error: file_system_id provided could not be found"
+
+ def recover_aws_fsx(self):
+ """
+ recover aws_fsx
+ """
+ json = {"name": self.parameters['name'],
+ "region": self.parameters['region'],
+ "workspaceId": self.parameters['workspace_id'],
+ "credentialsId": self.aws_credentials_id,
+ "fileSystemId": self.parameters['file_system_id'],
+ }
+ api_url = "/fsx-ontap/working-environments/%s/recover" % self.parameters['tenant_id']
+ response, error, dummy = self.rest_api.post(api_url, json, header=self.headers)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: unexpected response on recovering AWS FSx: %s, %s" % (error, response))
+
+ def create_aws_fsx(self):
+ """ Create AWS FSx """
+ json = {"name": self.parameters['name'],
+ "region": self.parameters['region'],
+ "workspaceId": self.parameters['workspace_id'],
+ "credentialsId": self.aws_credentials_id,
+ "throughputCapacity": self.parameters['throughput_capacity'],
+ "storageCapacity": {
+ "size": self.parameters['storage_capacity_size'],
+ "unit": self.parameters['storage_capacity_size_unit']},
+ "fsxAdminPassword": self.parameters['fsx_admin_password'],
+ "primarySubnetId": self.parameters['primary_subnet_id'],
+ "secondarySubnetId": self.parameters['secondary_subnet_id'],
+ }
+
+ if self.parameters.get('tags') is not None:
+ tags = []
+ for each_tag in self.parameters['tags']:
+ tag = {
+ 'key': each_tag['tag_key'],
+ 'value': each_tag['tag_value']
+ }
+
+ tags.append(tag)
+ json.update({"tags": tags})
+
+ if self.parameters.get('security_group_ids'):
+ json.update({"securityGroupIds": self.parameters['security_group_ids']})
+
+ if self.parameters.get('route_table_ids'):
+ json.update({"routeTableIds": self.parameters['route_table_ids']})
+
+ if self.parameters.get('kms_key_id'):
+ json.update({"kmsKeyId": self.parameters['kms_key_id']})
+
+ if self.parameters.get('minimum_ssd_iops'):
+ json.update({"minimumSsdIops": self.parameters['minimum_ssd_iops']})
+
+ if self.parameters.get('endpoint_ip_address_range'):
+ json.update({"endpointIpAddressRange": self.parameters['endpoint_ip_address_range']})
+
+ api_url = '/fsx-ontap/working-environments/%s' % self.parameters['tenant_id']
+ response, error, dummy = self.rest_api.post(api_url, json, header=self.headers)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: unexpected response on creating AWS FSx: %s, %s" % (str(error), str(response)))
+ working_environment_id = response['id']
+ creation_wait_time = 30
+ creation_retry_count = 30
+ wait_on_completion_api_url = '/fsx-ontap/working-environments/%s/%s?provider-details=true' % (self.parameters['tenant_id'], working_environment_id)
+
+ err = self.wait_on_completion_for_fsx(wait_on_completion_api_url, "AWS_FSX", "create", creation_retry_count, creation_wait_time)
+
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating AWS FSX: %s" % str(err))
+
+ return working_environment_id
+
+ def wait_on_completion_for_fsx(self, api_url, action_name, task, retries, wait_interval):
+ while True:
+ fsx_status, error = self.check_task_status_for_fsx(api_url)
+ if error is not None:
+ return error
+ if fsx_status['status']['status'] == "ON" and fsx_status['status']['lifecycle'] == "AVAILABLE":
+ return None
+ elif fsx_status['status']['status'] == "FAILED":
+ return 'Failed to %s %s' % (task, action_name)
+ if retries == 0:
+ return 'Taking too long for %s to %s or not properly setup' % (action_name, task)
+ time.sleep(wait_interval)
+ retries = retries - 1
+
+ def check_task_status_for_fsx(self, api_url):
+
+ network_retries = 3
+ exponential_retry_time = 1
+ while True:
+ result, error, dummy = self.rest_api.get(api_url, None, header=self.headers)
+ if error is not None:
+ if network_retries > 0:
+ time.sleep(exponential_retry_time)
+ exponential_retry_time *= 2
+ network_retries = network_retries - 1
+ else:
+ return 0, error
+ else:
+ response = result
+ break
+ return response['providerDetails'], None
+
+ def delete_aws_fsx(self, id, tenant_id):
+ """
+ Delete AWS FSx
+ """
+ api_url = '/fsx-ontap/working-environments/%s/%s' % (tenant_id, id)
+ response, error, dummy = self.rest_api.delete(api_url, None, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on deleting AWS FSx: %s, %s" % (str(error), str(response)))
+
+ def apply(self):
+ """
+ Apply action to the AWS FSx working Environment
+ :return: None
+ """
+ working_environment_id = None
+ current, error = self.na_helper.get_aws_fsx_details(self.rest_api, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on fetching AWS FSx: %s" % str(error))
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters['import_file_system'] and cd_action == "create":
+ error = self.discover_aws_fsx()
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on discovering AWS FSx: %s" % str(error))
+ cd_action = "import"
+ self.na_helper.changed = True
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == "import":
+ self.recover_aws_fsx()
+ working_environment_id = self.parameters['file_system_id']
+ elif cd_action == "create":
+ working_environment_id = self.create_aws_fsx()
+ elif cd_action == "delete":
+ self.delete_aws_fsx(current['id'], self.parameters['tenant_id'])
+
+ self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id)
+
+
+def main():
+ """
+ Create AWS FSx class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppCloudManagerAWSFSX()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cifs_server.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cifs_server.py
new file mode 100644
index 000000000..89e10a81b
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cifs_server.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_cifs_server
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_cifs_server
+short_description: NetApp Cloud Manager cifs server
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or Delete a CIFS server on the Cloud Volume ONTAP system to support CIFS volumes, based on an Active Directory or Workgroup.
+
+options:
+ state:
+ description:
+ - Whether the specified cifs server should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ working_environment_name:
+ description:
+ - The working environment name where the cifs server will be created.
+ type: str
+
+ working_environment_id:
+ description:
+ - The public ID of the working environment where the cifs server will be created.
+ type: str
+
+ client_id:
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ required: true
+ type: str
+
+ domain:
+ description:
+ - The active directory domain name. For CIFS AD only.
+ type: str
+
+ dns_domain:
+ description:
+ - The DNS domain name. For CIFS AD only.
+ type: str
+
+ username:
+ description:
+ - The active directory admin user name. For CIFS AD only.
+ type: str
+
+ password:
+ description:
+ - The active directory admin password. For CIFS AD only.
+ type: str
+
+ ip_addresses:
+ description:
+ - The DNS server IP addresses. For CIFS AD only.
+ type: list
+ elements: str
+
+ netbios:
+ description:
+ - The CIFS server NetBIOS name. For CIFS AD only.
+ type: str
+
+ organizational_unit:
+ description:
+ - The organizational unit in which to register the CIFS server. For CIFS AD only.
+ type: str
+
+ is_workgroup:
+ description:
+ - For CIFS workgroup operations, set to true.
+ type: bool
+
+ server_name:
+ description:
+ - The server name. For CIFS workgroup only.
+ type: str
+
+ workgroup_name:
+ description:
+ - The workgroup name. For CIFS workgroup only.
+ type: str
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = '''
+- name: Create cifs server with working_environment_id
+ netapp.cloudmanager.na_cloudmanager_cifs_server:
+ state: present
+ working_environment_id: VsaWorkingEnvironment-abcdefgh
+ client_id: your_client_id
+ refresh_token: your_refresh_token
+ domain: example.com
+ username: admin
+ password: pass
+ dns_domain: example.com
+ ip_addresses: ["1.0.0.0"]
+ netbios: cvoname
+ organizational_unit: CN=Computers
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppCloudmanagerCifsServer:
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check parameters and ensure request module is installed
+ """
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ working_environment_id=dict(required=False, type='str'),
+ working_environment_name=dict(required=False, type='str'),
+ client_id=dict(required=True, type='str'),
+ domain=dict(required=False, type='str'),
+ dns_domain=dict(required=False, type='str'),
+ username=dict(required=False, type='str'),
+ password=dict(required=False, type='str', no_log=True),
+ ip_addresses=dict(required=False, type='list', elements='str'),
+ netbios=dict(required=False, type='str'),
+ organizational_unit=dict(required=False, type='str'),
+ is_workgroup=dict(required=False, type='bool'),
+ server_name=dict(required=False, type='str'),
+ workgroup_name=dict(required=False, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[
+ ['refresh_token', 'sa_client_id'],
+ ['working_environment_name', 'working_environment_id'],
+ ],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ mutually_exclusive=[
+ ('domain', 'server_name'),
+ ('dns_domain', 'server_name'),
+ ('username', 'server_name'),
+ ('password', 'server_name'),
+ ('ip_addresses', 'server_name'),
+ ('netbios', 'server_name'),
+ ('organizational_unit', 'server_name'),
+ ('domain', 'workgroup_name'),
+ ('dns_domain', 'workgroup_name'),
+ ('username', 'workgroup_name'),
+ ('password', 'workgroup_name'),
+ ('ip_addresses', 'workgroup_name'),
+ ('netbios', 'workgroup_name'),
+ ('organizational_unit', 'workgroup_name'),
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Calling generic rest_api class
+ self.rest_api = netapp_utils.CloudManagerRestAPI(self.module)
+ self.rest_api.token_type, self.rest_api.token = self.rest_api.get_token()
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+ if self.parameters.get('working_environment_id'):
+ working_environment_detail, error = self.na_helper.get_working_environment_details(self.rest_api, self.headers)
+ else:
+ working_environment_detail, error = self.na_helper.get_working_environment_details_by_name(self.rest_api,
+ self.headers,
+ self.parameters['working_environment_name'])
+ if working_environment_detail is not None:
+ self.parameters['working_environment_id'] = working_environment_detail['publicId']
+ else:
+ self.module.fail_json(msg="Error: Cannot find working environment: %s" % str(error))
+ self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
+
+ def get_cifs_server(self):
+ response, err, dummy = self.rest_api.send_request("GET", "%s/working-environments/%s/cifs" % (
+ self.rest_api.api_root_path, self.parameters['working_environment_id']), None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error on get_cifs_server: %s, %s" % (str(err), str(response)))
+ current_cifs = dict()
+ if response is None or len(response) == 0:
+ return None
+ # only one cifs server exists per working environment.
+ for server in response:
+ if server.get('activeDirectoryDomain'):
+ current_cifs['domain'] = server['activeDirectoryDomain']
+ if server.get('dnsDomain'):
+ current_cifs['dns_domain'] = server['dnsDomain']
+ if server.get('ipAddresses'):
+ current_cifs['ip_addresses'] = server['ipAddresses']
+ if server.get('organizationalUnit'):
+ current_cifs['organizational_unit'] = server['organizationalUnit']
+ if server.get('netBIOS'):
+ current_cifs['netbios'] = server['netBIOS']
+ return current_cifs
+
+ def create_cifs_server(self):
+ exclude_list = ['client_id', 'domain', 'netbios', 'username', 'password']
+ server = self.na_helper.convert_module_args_to_api(self.parameters, exclude_list)
+ if self.parameters.get('domain'):
+ server['activeDirectoryDomain'] = self.parameters['domain']
+ if self.parameters.get('netbios'):
+ server['netBIOS'] = self.parameters['netbios']
+ if self.parameters.get('username'):
+ server['activeDirectoryUsername'] = self.parameters['username']
+ if self.parameters.get('password'):
+ server['activeDirectoryPassword'] = self.parameters['password']
+ url = "%s/working-environments/%s/cifs" % (self.rest_api.api_root_path,
+ self.parameters['working_environment_id'])
+ if self.parameters.get('is_workgroup'):
+ url = url + "-workgroup"
+
+ response, err, dummy = self.rest_api.send_request("POST", url, None, server, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error on create_cifs_server failed: %s, %s" % (str(err), str(response)))
+
+ def delete_cifs_server(self):
+ response, err, dummy = self.rest_api.send_request("POST", "%s/working-environments/%s/delete-cifs" % (
+ self.rest_api.api_root_path, self.parameters['working_environment_id']), None, {}, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error on delete_cifs_server: %s, %s" % (str(err), str(response)))
+
+ def apply(self):
+ current = self.get_cifs_server()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_cifs_server()
+ elif cd_action == 'delete':
+ self.delete_cifs_server()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Main Function'''
+ server = NetAppCloudmanagerCifsServer()
+ server.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_aws.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_aws.py
new file mode 100644
index 000000000..b1a22829e
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_aws.py
@@ -0,0 +1,655 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_connector_aws
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_connector_aws
+short_description: NetApp Cloud Manager connector for AWS
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create or delete Cloud Manager connector for AWS.
+ - This module requires to be authenticated with AWS. This can be done with C(aws configure).
+
+options:
+
+ state:
+ description:
+ - Whether the specified Cloud Manager connector for AWS should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the Cloud Manager connector for AWS to manage.
+ type: str
+
+ instance_type:
+ description:
+ - The type of instance (for example, t3.xlarge). At least 4 CPU and 16 GB of memory are required.
+ type: str
+ default: t3.xlarge
+
+ key_name:
+ description:
+ - The name of the key pair to use for the Connector instance.
+ type: str
+
+ subnet_id:
+ description:
+ - The ID of the subnet for the instance.
+ type: str
+
+ region:
+ required: true
+ description:
+ - The region where the Cloud Manager Connector will be created.
+ type: str
+
+ instance_id:
+ description:
+ - The ID of the EC2 instance used for delete.
+ type: str
+
+ client_id:
+ description:
+ - The unique client ID of the Connector.
+ - The connector ID.
+ type: str
+
+ ami:
+ description:
+ - The image ID.
+ type: str
+
+ company:
+ description:
+ - The name of the company of the user.
+ type: str
+
+ security_group_ids:
+ description:
+ - The IDs of the security groups for the instance, multiple security groups can be provided separated by ','.
+ type: list
+ elements: str
+
+ iam_instance_profile_name:
+ description:
+ - The name of the instance profile for the Connector.
+ type: str
+
+ enable_termination_protection:
+ description:
+ - Indicates whether to enable termination protection on the instance.
+ type: bool
+ default: false
+
+ associate_public_ip_address:
+ description:
+ - Indicates whether to associate a public IP address to the instance. If not provided, the association will be done based on the subnet's configuration.
+ type: bool
+ default: true
+
+ account_id:
+ description:
+ - The NetApp tenancy account ID.
+ type: str
+
+ proxy_url:
+ description:
+ - The proxy URL, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_user_name:
+ description:
+ - The proxy user name, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_password:
+ description:
+ - The proxy password, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_certificates:
+ description:
+ - The proxy certificates, a list of certificate file names.
+ type: list
+ elements: str
+ version_added: 21.5.0
+
+ aws_tag:
+ description:
+ - Additional tags for the AWS EC2 instance.
+ type: list
+ elements: dict
+ suboptions:
+ tag_key:
+ description: The key of the tag.
+ type: str
+ tag_value:
+ description: The tag value.
+ type: str
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = """
+- name: Create NetApp Cloud Manager connector for AWS
+ netapp.cloudmanager.na_cloudmanager_connector_aws:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: bsuhas_ansible_occm
+ region: us-west-1
+ key_name: dev_automation
+ subnet_id: subnet-xxxxx
+ security_group_ids: [sg-xxxxxxxxxxx]
+ iam_instance_profile_name: OCCM_AUTOMATION
+ account_id: "{{ account-xxxxxxx }}"
+ company: NetApp
+ proxy_url: abc.com
+ proxy_user_name: xyz
+ proxy_password: abcxyz
+ proxy_certificates: [abc.crt.txt, xyz.crt.txt]
+ aws_tag: [
+ {tag_key: abc,
+ tag_value: a123}]
+
+- name: Delete NetApp Cloud Manager connector for AWS
+ netapp.cloudmanager.na_cloudmanager_connector_aws:
+ state: absent
+ name: ansible
+ region: us-west-1
+ account_id: "{{ account-xxxxxxx }}"
+ instance_id: i-xxxxxxxxxxxxx
+ client_id: xxxxxxxxxxxxxxxxxxx
+"""
+
+RETURN = """
+ids:
+ description: Newly created AWS client ID in cloud manager, instance ID and account ID.
+ type: dict
+ returned: success
+"""
+
+import traceback
+import uuid
+import time
+import base64
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+IMPORT_EXCEPTION = None
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_AWS_LIB = True
+except ImportError as exc:
+ HAS_AWS_LIB = False
+ IMPORT_EXCEPTION = exc
+
+UUID = str(uuid.uuid4())
+
+
+class NetAppCloudManagerConnectorAWS(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ instance_type=dict(required=False, type='str', default='t3.xlarge'),
+ key_name=dict(required=False, type='str'),
+ subnet_id=dict(required=False, type='str'),
+ region=dict(required=True, type='str'),
+ instance_id=dict(required=False, type='str'),
+ client_id=dict(required=False, type='str'),
+ ami=dict(required=False, type='str'),
+ company=dict(required=False, type='str'),
+ security_group_ids=dict(required=False, type='list', elements='str'),
+ iam_instance_profile_name=dict(required=False, type='str'),
+ enable_termination_protection=dict(required=False, type='bool', default=False),
+ associate_public_ip_address=dict(required=False, type='bool', default=True),
+ account_id=dict(required=False, type='str'),
+ proxy_url=dict(required=False, type='str'),
+ proxy_user_name=dict(required=False, type='str'),
+ proxy_password=dict(required=False, type='str', no_log=True),
+ proxy_certificates=dict(required=False, type='list', elements='str'),
+ aws_tag=dict(required=False, type='list', elements='dict', options=dict(
+ tag_key=dict(type='str', no_log=False),
+ tag_value=dict(type='str')
+ )),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ['state', 'present', ['company', 'iam_instance_profile_name', 'key_name', 'security_group_ids', 'subnet_id']],
+ ],
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ supports_check_mode=True
+ )
+
+ if HAS_AWS_LIB is False:
+ self.module.fail_json(msg="the python AWS packages boto3 and botocore are required. Command is pip install boto3."
+ "Import error: %s" % str(IMPORT_EXCEPTION))
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = CloudManagerRestAPI(self.module)
+
+ def get_instance(self):
+ """
+ Get Cloud Manager connector for AWS
+ :return:
+ Dictionary of current details if Cloud Manager connector for AWS
+ None if Cloud Manager connector for AWS is not found
+ """
+
+ response = None
+ client = boto3.client('ec2', region_name=self.parameters['region'])
+ filters = [{'Name': 'tag:Name', 'Values': [self.parameters['name']]},
+ {'Name': 'tag:OCCMInstance', 'Values': ['true']}]
+
+ kwargs = {'Filters': filters} if self.parameters.get('instance_id') is None else {'InstanceIds': [self.parameters['instance_id']]}
+
+ try:
+ response = client.describe_instances(**kwargs)
+
+ except ClientError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ if len(response['Reservations']) == 0:
+ return None
+
+ actives = [instance for reservation in response['Reservations'] for instance in reservation['Instances'] if instance['State']['Name'] != 'terminated']
+ if len(actives) == 1:
+ return actives[0]
+ if not actives:
+ return None
+ self.module.fail_json(msg="Error: found multiple instances for name=%s: %s" % (self.parameters['name'], str(actives)))
+
+ def get_ami(self):
+ """
+ Get AWS EC2 Image
+ :return:
+ Latest AMI
+ """
+
+ instance_ami = None
+ client = boto3.client('ec2', region_name=self.parameters['region'])
+
+ try:
+ instance_ami = client.describe_images(
+ Filters=[
+ {
+ 'Name': 'name',
+ 'Values': [
+ self.rest_api.environment_data['AMI_FILTER'],
+ ]
+ },
+ ],
+ Owners=[
+ self.rest_api.environment_data['AWS_ACCOUNT'],
+ ],
+ )
+ except ClientError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ latest_date = instance_ami['Images'][0]['CreationDate']
+ latest_ami = instance_ami['Images'][0]['ImageId']
+
+ for image in instance_ami['Images']:
+ if image['CreationDate'] > latest_date:
+ latest_date = image['CreationDate']
+ latest_ami = image['ImageId']
+
+ return latest_ami
+
+ def create_instance(self):
+ """
+ Create Cloud Manager connector for AWS
+ :return: client_id, instance_id
+ """
+
+ if self.parameters.get('ami') is None:
+ self.parameters['ami'] = self.get_ami()
+
+ user_data, client_id = self.register_agent_to_service()
+
+ ec2 = boto3.client('ec2', region_name=self.parameters['region'])
+
+ tags = [
+ {
+ 'Key': 'Name',
+ 'Value': self.parameters['name']
+ },
+ {
+ 'Key': 'OCCMInstance',
+ 'Value': 'true'
+ },
+ ]
+
+ if self.parameters.get('aws_tag') is not None:
+ for each_tag in self.parameters['aws_tag']:
+ tag = {
+ 'Key': each_tag['tag_key'],
+ 'Value': each_tag['tag_value']
+ }
+
+ tags.append(tag)
+
+ instance_input = {
+ 'BlockDeviceMappings': [
+ {
+ 'DeviceName': '/dev/sda1',
+ 'Ebs': {
+ 'Encrypted': True,
+ 'VolumeSize': 100,
+ 'VolumeType': 'gp2',
+ },
+ },
+ ],
+ 'ImageId': self.parameters['ami'],
+ 'MinCount': 1,
+ 'MaxCount': 1,
+ 'KeyName': self.parameters['key_name'],
+ 'InstanceType': self.parameters['instance_type'],
+ 'DisableApiTermination': self.parameters['enable_termination_protection'],
+ 'TagSpecifications': [
+ {
+ 'ResourceType': 'instance',
+ 'Tags': tags
+ },
+ ],
+ 'IamInstanceProfile': {
+ 'Name': self.parameters['iam_instance_profile_name']
+ },
+ 'UserData': user_data
+ }
+
+ if self.parameters.get('associate_public_ip_address') is True:
+ instance_input['NetworkInterfaces'] = [
+ {
+ 'AssociatePublicIpAddress': self.parameters['associate_public_ip_address'],
+ 'DeviceIndex': 0,
+ 'SubnetId': self.parameters['subnet_id'],
+ 'Groups': self.parameters['security_group_ids']
+ }
+ ]
+ else:
+ instance_input['SubnetId'] = self.parameters['subnet_id']
+ instance_input['SecurityGroupIds'] = self.parameters['security_group_ids']
+
+ try:
+ result = ec2.run_instances(**instance_input)
+ except ClientError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ # Sleep for 2 minutes
+ time.sleep(120)
+ retries = 16
+ while retries > 0:
+ agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, client_id)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: not able to get occm status: %s, %s" % (str(error), str(agent)),
+ client_id=client_id, instance_id=result['Instances'][0]['InstanceId'])
+ if agent['status'] == "active":
+ break
+ else:
+ time.sleep(30)
+ retries -= 1
+ if retries == 0:
+ # Taking too long for status to be active
+ return self.module.fail_json(msg="Error: taking too long for OCCM agent to be active or not properly setup")
+
+ return client_id, result['Instances'][0]['InstanceId']
+
+ def get_vpc(self):
+ """
+ Get vpc
+ :return: vpc ID
+ """
+
+ vpc_result = None
+ ec2 = boto3.client('ec2', region_name=self.parameters['region'])
+
+ vpc_input = {'SubnetIds': [self.parameters['subnet_id']]}
+
+ try:
+ vpc_result = ec2.describe_subnets(**vpc_input)
+ except ClientError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ return vpc_result['Subnets'][0]['VpcId']
+
+ def set_account_id(self):
+ if self.parameters.get('account_id') is None:
+ response, error = self.na_helper.get_or_create_account(self.rest_api)
+ if error is not None:
+ return error
+ self.parameters['account_id'] = response
+ return None
+
+ def register_agent_to_service(self):
+ """
+ Register agent to service and collect userdata by setting up connector
+ :return: UserData, ClientID
+ """
+
+ vpc = self.get_vpc()
+
+ if self.parameters.get('account_id') is None:
+ error = self.set_account_id()
+ if error is not None:
+ self.module.fail_json(msg="Error: failed to get account: %s." % str(error))
+
+ headers = {
+ "X-User-Token": self.rest_api.token_type + " " + self.rest_api.token,
+ "X-Service-Request-Id": "111"
+ }
+ body = {
+ "accountId": self.parameters['account_id'],
+ "name": self.parameters['name'],
+ "company": self.parameters['company'],
+ "placement": {
+ "provider": "AWS",
+ "region": self.parameters['region'],
+ "network": vpc,
+ "subnet": self.parameters['subnet_id'],
+ },
+ "extra": {
+ "proxy": {
+ "proxyUrl": self.parameters.get('proxy_url'),
+ "proxyUserName": self.parameters.get('proxy_user_name'),
+ "proxyPassword": self.parameters.get('proxy_password')
+ }
+ }
+ }
+
+ register_api = '/agents-mgmt/connector-setup'
+ response, error, dummy = self.rest_api.post(register_api, body, header=headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on connector setup: %s, %s" % (str(error), str(response)))
+ client_id = response['clientId']
+ client_secret = response['clientSecret']
+
+ u_data = {
+ 'instanceName': self.parameters['name'],
+ 'company': self.parameters['company'],
+ 'clientId': client_id,
+ 'clientSecret': client_secret,
+ 'systemId': UUID,
+ 'tenancyAccountId': self.parameters['account_id'],
+ 'proxySettings': {'proxyPassword': self.parameters.get('proxy_password'),
+ 'proxyUserName': self.parameters.get('proxy_user_name'),
+ 'proxyUrl': self.parameters.get('proxy_url'),
+ },
+ 'localAgent': True
+ }
+
+ if self.parameters.get('proxy_certificates') is not None:
+ proxy_certificates = []
+ for certificate_file in self.parameters['proxy_certificates']:
+ encoded_certificate, error = self.na_helper.encode_certificates(certificate_file)
+ if error:
+ self.module.fail_json(msg="Error: could not open/read file '%s' of proxy_certificates: %s" % (certificate_file, error))
+ proxy_certificates.append(encoded_certificate)
+
+ if proxy_certificates:
+ u_data['proxySettings']['proxyCertificates'] = proxy_certificates
+
+ user_data = self.na_helper.convert_data_to_tabbed_jsonstring(u_data)
+
+ return user_data, client_id
+
+ def delete_instance(self):
+ """
+ Delete OCCM instance
+ :return:
+ None
+ """
+
+ ec2 = boto3.client('ec2', region_name=self.parameters['region'])
+ try:
+ ec2.terminate_instances(
+ InstanceIds=[
+ self.parameters['instance_id'],
+ ],
+ )
+ except ClientError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ if 'client_id' not in self.parameters:
+ return None
+
+ retries = 30
+ while retries > 0:
+ agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id'])
+ if error is not None:
+ return "Error: not able to get occm agent status after deleting instance: %s, %s." % (str(error), str(agent))
+ if agent['status'] != "active":
+ break
+ else:
+ time.sleep(10)
+ retries -= 1
+ if retries == 0:
+ # Taking too long for terminating OCCM
+ return "Error: taking too long for instance to finish terminating."
+ return None
+
+ def get_occm_agents(self):
+ if 'client_id' in self.parameters:
+ agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id'])
+ if str(error) == '403' and 'Action not allowed for user' in str(agent):
+ # assume the agent does not exist anymore
+ agents, error = [], None
+ self.module.warn('Client Id %s was not found for this account.' % self.parameters['client_id'])
+ else:
+ agents = [agent]
+ else:
+ self.set_account_id()
+ if 'account_id' in self.parameters:
+ agents, error = self.na_helper.get_occm_agents_by_name(self.rest_api, self.parameters['account_id'],
+ self.parameters['name'], 'AWS')
+ else:
+ self.module.warn('Without account_id, some agents may still exist.')
+ agents, error = [], None
+ if error:
+ self.module.fail_json(
+ msg="Error: getting OCCM agents: %s, %s" % (str(error), str(agents)))
+ return agents
+
+ def set_client_id(self):
+ agents = self.get_occm_agents()
+ client_id = self.parameters.get('client_id')
+ if client_id is None:
+ active_client_ids = [agent['agentId'] for agent in agents if 'agentId' in agent and agent['status'] == 'active']
+ if len(active_client_ids) == 1:
+ client_id = active_client_ids[0]
+ self.parameters['client_id'] = client_id
+ return client_id, agents
+
+ def delete_occm_agents(self, agents):
+ error = self.na_helper.delete_occm_agents(self.rest_api, agents)
+ if error:
+ return "Error: deleting OCCM agent(s): %s" % error
+ return None
+
+ def apply(self):
+ """
+ Apply action to the Cloud Manager connector for AWS
+ :return: None
+ """
+ results = {
+ 'account_id': None,
+ 'client_id': None,
+ 'instance_id': None
+ }
+ agents = None
+ current = self.get_instance()
+ if current or self.parameters['state'] == 'absent':
+ if self.parameters.get('instance_id') is None and current:
+ self.parameters['instance_id'] = current['InstanceId']
+ results['instance_id'] = self.parameters.get('instance_id')
+ results['client_id'], agents = self.set_client_id()
+ if current is None and agents:
+ # it's possible the VM instance does not exist, but the clients are still present.
+ current = agents
+
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ results['modify'] = 'Note: modifying an existing connector is not supported at this time.'
+
+ if not self.module.check_mode and self.na_helper.changed:
+ if cd_action == 'create':
+ results['client_id'], results['instance_id'] = self.create_instance()
+ elif cd_action == 'delete':
+ errors = []
+ if self.parameters.get('instance_id'):
+ errors.append(self.delete_instance())
+ if agents:
+ errors.append(self.delete_occm_agents(agents))
+ errors = [error for error in errors if error]
+ if errors:
+ self.module.fail_json(msg='Errors deleting instance or client: %s' % ', '.join(errors))
+
+ results['account_id'] = self.parameters.get('account_id')
+ results['changed'] = self.na_helper.changed
+ self.module.exit_json(**results)
+
+
+def main():
+ """
+ Create Cloud Manager connector for AWS class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppCloudManagerConnectorAWS()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_azure.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_azure.py
new file mode 100644
index 000000000..6f1d30a32
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_azure.py
@@ -0,0 +1,591 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_connector_azure
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_connector_azure
+short_description: NetApp Cloud Manager connector for Azure.
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or delete Cloud Manager connector for Azure.
+
+options:
+
+ state:
+ description:
+ - Whether the specified Cloud Manager connector for Azure should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the Cloud Manager connector for Azure to manage.
+ type: str
+
+ virtual_machine_size:
+ description:
+ - The virtual machine type. (for example, Standard_DS3_v2).
+ - At least 4 CPU and 16 GB of memory are required.
+ type: str
+ default: Standard_DS3_v2
+
+ resource_group:
+ required: true
+ description:
+ - The resource group in Azure where the resources will be created.
+ type: str
+
+ subnet_name:
+ required: true
+ description:
+ - The name of the subnet for the virtual machine.
+ - For example, in /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Network/virtualNetworks/xxx/subnets/default,
+ only default is needed.
+ aliases:
+ - subnet_id
+ type: str
+ version_added: '21.7.0'
+
+ location:
+ required: true
+ description:
+ - The location where the Cloud Manager Connector will be created.
+ type: str
+
+ client_id:
+ description:
+ - The unique client ID of the Connector.
+ - The connector ID.
+ type: str
+
+ subscription_id:
+ required: true
+ description:
+ - The ID of the Azure subscription.
+ type: str
+
+ company:
+ required: true
+ description:
+ - The name of the company of the user.
+ type: str
+
+ vnet_name:
+ required: true
+ description:
+ - The name of the virtual network.
+ - for example, in /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Network/virtualNetworks/default,
+ only default is needed.
+ aliases:
+ - vnet_id
+ type: str
+ version_added: '21.7.0'
+
+ vnet_resource_group:
+ description:
+ - The resource group in Azure associated with the virtual network.
+ - If not provided, its assumed that the VNet is within the previously specified resource group.
+ type: str
+
+ network_security_resource_group:
+ description:
+ - The resource group in Azure associated with the security group.
+ - If not provided, its assumed that the security group is within the previously specified resource group.
+ type: str
+
+ network_security_group_name:
+ required: true
+ description:
+ - The name of the security group for the deployment.
+ type: str
+
+ proxy_certificates:
+ description:
+ - The proxy certificates, a list of certificate file names.
+ type: list
+ elements: str
+
+ associate_public_ip_address:
+ description:
+ - Indicates whether to associate the public IP address to the virtual machine.
+ type: bool
+ default: true
+
+ account_id:
+ required: true
+ description:
+ - The NetApp tenancy account ID.
+ type: str
+
+ proxy_url:
+ description:
+ - The proxy URL, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_user_name:
+ description:
+ - The proxy user name, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_password:
+ description:
+ - The proxy password, if using a proxy to connect to the internet.
+ type: str
+
+ admin_username:
+ required: true
+ description:
+ - The user name for the Connector.
+ type: str
+
+ admin_password:
+ required: true
+ description:
+ - The password for the Connector.
+ type: str
+
+ storage_account:
+ description:
+ - The storage account can be created automatically.
+ - When C(storage_account) is not set, the name is constructed by appending 'sa' to the connector C(name).
+ - Storage account name must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+ type: str
+ version_added: '21.17.0'
+'''
+
+EXAMPLES = """
+- name: Create NetApp Cloud Manager connector for Azure.
+ netapp.cloudmanager.na_cloudmanager_connector_azure:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: bsuhas_ansible_occm
+ location: westus
+ resource_group: occm_group_westus
+ subnet_name: subnetxxxxx
+ vnet_name: Vnetxxxxx
+ subscription_id: "{{ xxxxxxxxxxxxxxxxx }}"
+ account_id: "{{ account-xxxxxxx }}"
+ company: NetApp
+ admin_password: Netapp123456
+ admin_username: bsuhas
+ network_security_group_name: OCCM_SG
+ proxy_url: abc.com
+ proxy_user_name: xyz
+ proxy_password: abcxyz
+ proxy_certificates: [abc.crt.txt, xyz.crt.txt]
+
+- name: Delete NetApp Cloud Manager connector for Azure.
+ netapp.cloudmanager.na_cloudmanager_connector_azure:
+ state: absent
+ name: ansible
+ location: westus
+ resource_group: occm_group_westus
+ network_security_group_name: OCCM_SG
+ subnet_name: subnetxxxxx
+ company: NetApp
+ admin_password: Netapp123456
+ admin_username: bsuhas
+ vnet_name: Vnetxxxxx
+ subscription_id: "{{ xxxxxxxxxxxxxxxxx }}"
+ account_id: "{{ account-xxxxxxx }}"
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ client_id: xxxxxxxxxxxxxxxxxxx
+"""
+
+RETURN = """
+msg:
+ description: Newly created Azure connector id in cloud manager.
+ type: str
+ returned: success
+ sample: 'xxxxxxxxxxxxxxxx'
+"""
+
+import traceback
+import time
+import base64
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+IMPORT_EXCEPTION = None
+
+try:
+ from azure.mgmt.resource import ResourceManagementClient
+ from azure.mgmt.compute import ComputeManagementClient
+ from azure.mgmt.network import NetworkManagementClient
+ from azure.mgmt.storage import StorageManagementClient
+ from azure.mgmt.resource.resources.models import Deployment
+ from azure.common.client_factory import get_client_from_cli_profile
+ from msrestazure.azure_exceptions import CloudError
+ HAS_AZURE_LIB = True
+except ImportError as exc:
+ HAS_AZURE_LIB = False
+ IMPORT_EXCEPTION = exc
+
+
+class NetAppCloudManagerConnectorAzure(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ virtual_machine_size=dict(required=False, type='str', default='Standard_DS3_v2'),
+ resource_group=dict(required=True, type='str'),
+ subscription_id=dict(required=True, type='str'),
+ subnet_name=dict(required=True, type='str', aliases=['subnet_id']),
+ vnet_name=dict(required=True, type='str', aliases=['vnet_id']),
+ vnet_resource_group=dict(required=False, type='str'),
+ location=dict(required=True, type='str'),
+ network_security_resource_group=dict(required=False, type='str'),
+ network_security_group_name=dict(required=True, type='str'),
+ client_id=dict(required=False, type='str'),
+ company=dict(required=True, type='str'),
+ proxy_certificates=dict(required=False, type='list', elements='str'),
+ associate_public_ip_address=dict(required=False, type='bool', default=True),
+ account_id=dict(required=True, type='str'),
+ proxy_url=dict(required=False, type='str'),
+ proxy_user_name=dict(required=False, type='str'),
+ proxy_password=dict(required=False, type='str', no_log=True),
+ admin_username=dict(required=True, type='str'),
+ admin_password=dict(required=True, type='str', no_log=True),
+ storage_account=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ['state', 'absent', ['client_id']]
+ ],
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ supports_check_mode=True
+ )
+
+ if HAS_AZURE_LIB is False:
+ self.module.fail_json(msg="the python AZURE library azure.mgmt and azure.common is required. Command is pip install azure-mgmt, azure-common."
+ " Import error: %s" % str(IMPORT_EXCEPTION))
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if 'storage_account' not in self.parameters or self.parameters['storage_account'] == "":
+ self.parameters['storage_account'] = self.parameters['name'].lower() + 'sa'
+ self.rest_api = CloudManagerRestAPI(self.module)
+
+ def get_deploy_azure_vm(self):
+ """
+ Get Cloud Manager connector for AZURE
+ :return:
+ Dictionary of current details if Cloud Manager connector for AZURE
+ None if Cloud Manager connector for AZURE is not found
+ """
+
+ exists = False
+
+ resource_client = get_client_from_cli_profile(ResourceManagementClient)
+ try:
+ exists = resource_client.deployments.check_existence(self.parameters['resource_group'], self.parameters['name'])
+
+ except CloudError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ if not exists:
+ return None
+
+ return exists
+
+ def deploy_azure(self):
+ """
+ Create Cloud Manager connector for Azure
+ :return: client_id
+ """
+
+ user_data, client_id = self.register_agent_to_service()
+ template = json.loads(self.na_helper.call_template())
+ params = json.loads(self.na_helper.call_parameters())
+ params['adminUsername']['value'] = self.parameters['admin_username']
+ params['adminPassword']['value'] = self.parameters['admin_password']
+ params['customData']['value'] = json.dumps(user_data)
+ params['location']['value'] = self.parameters['location']
+ params['virtualMachineName']['value'] = self.parameters['name']
+ params['storageAccount']['value'] = self.parameters['storage_account']
+ if self.rest_api.environment == 'stage':
+ params['environment']['value'] = self.rest_api.environment
+ if '/subscriptions' in self.parameters['vnet_name']:
+ network = self.parameters['vnet_name']
+ else:
+ if self.parameters.get('vnet_resource_group') is not None:
+ network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % (
+ self.parameters['subscription_id'], self.parameters['vnet_resource_group'], self.parameters['vnet_name'])
+ else:
+ network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % (
+ self.parameters['subscription_id'], self.parameters['resource_group'], self.parameters['vnet_name'])
+
+ if '/subscriptions' in self.parameters['subnet_name']:
+ subnet = self.parameters['subnet_name']
+ else:
+ subnet = '%s/subnets/%s' % (network, self.parameters['subnet_name'])
+
+ if self.parameters.get('network_security_resource_group') is not None:
+ network_security_group_name = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups/%s' % (
+ self.parameters['subscription_id'], self.parameters['network_security_resource_group'], self.parameters['network_security_group_name'])
+ else:
+ network_security_group_name = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups/%s' % (
+ self.parameters['subscription_id'], self.parameters['resource_group'], self.parameters['network_security_group_name'])
+
+ params['virtualNetworkId']['value'] = network
+ params['networkSecurityGroupName']['value'] = network_security_group_name
+ params['virtualMachineSize']['value'] = self.parameters['virtual_machine_size']
+ params['subnetId']['value'] = subnet
+
+ try:
+ resource_client = get_client_from_cli_profile(ResourceManagementClient)
+
+ resource_client.resource_groups.create_or_update(
+ self.parameters['resource_group'],
+ {"location": self.parameters['location']})
+
+ deployment_properties = {
+ 'mode': 'Incremental',
+ 'template': template,
+ 'parameters': params
+ }
+ resource_client.deployments.begin_create_or_update(
+ self.parameters['resource_group'],
+ self.parameters['name'],
+ Deployment(properties=deployment_properties)
+ )
+
+ except CloudError as error:
+ self.module.fail_json(msg="Error in deploy_azure: %s" % to_native(error), exception=traceback.format_exc())
+
+ # Sleep for 2 minutes
+ time.sleep(120)
+ retries = 30
+ while retries > 0:
+ occm_resp, error = self.na_helper.check_occm_status(self.rest_api, client_id)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: Not able to get occm status: %s, %s" % (str(error), str(occm_resp)))
+ if occm_resp['agent']['status'] == "active":
+ break
+ else:
+ time.sleep(30)
+ retries -= 1
+ if retries == 0:
+ # Taking too long for status to be active
+ return self.module.fail_json(msg="Taking too long for OCCM agent to be active or not properly setup")
+
+ try:
+ compute_client = get_client_from_cli_profile(ComputeManagementClient)
+ vm = compute_client.virtual_machines.get(self.parameters['resource_group'], self.parameters['name'])
+ except CloudError as error:
+ return self.module.fail_json(msg="Error in deploy_azure (get identity): %s" % to_native(error), exception=traceback.format_exc())
+
+ principal_id = vm.identity.principal_id
+ return client_id, principal_id
+
+ def register_agent_to_service(self):
+ """
+ Register agent to service and collect userdata by setting up connector
+ :return: UserData, ClientID
+ """
+
+ if '/subscriptions' in self.parameters['vnet_name']:
+ network = self.parameters['vnet_name']
+ else:
+ if self.parameters.get('vnet_resource_group') is not None:
+ network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % (
+ self.parameters['subscription_id'], self.parameters['vnet_resource_group'], self.parameters['vnet_name'])
+ else:
+ network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % (
+ self.parameters['subscription_id'], self.parameters['resource_group'], self.parameters['vnet_name'])
+
+ if '/subscriptions' in self.parameters['subnet_name']:
+ subnet = self.parameters['subnet_name']
+ else:
+ subnet = '%s/subnets/%s' % (network, self.parameters['subnet_name'])
+
+ if self.parameters.get('account_id') is None:
+ response, error = self.na_helper.get_or_create_account(self.rest_api)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: unexpected response on getting account: %s, %s" % (str(error), str(response)))
+ self.parameters['account_id'] = response
+
+ headers = {
+ "X-User-Token": self.rest_api.token_type + " " + self.rest_api.token,
+ }
+ body = {
+ "accountId": self.parameters['account_id'],
+ "name": self.parameters['name'],
+ "company": self.parameters['company'],
+ "placement": {
+ "provider": "AZURE",
+ "region": self.parameters['location'],
+ "network": network,
+ "subnet": subnet,
+ },
+ "extra": {
+ "proxy": {
+ "proxyUrl": self.parameters.get('proxy_url'),
+ "proxyUserName": self.parameters.get('proxy_user_name'),
+ "proxyPassword": self.parameters.get('proxy_password')
+ }
+ }
+ }
+
+ register_url = "%s/agents-mgmt/connector-setup" % self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ response, error, dummy = self.rest_api.post(register_url, body, header=headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on getting userdata for connector setup: %s, %s" % (str(error), str(response)))
+ client_id = response['clientId']
+
+ proxy_certificates = []
+ if self.parameters.get('proxy_certificates') is not None:
+ for each in self.parameters['proxy_certificates']:
+ try:
+ data = open(each, "r").read()
+ except OSError:
+ self.module.fail_json(msg="Error: Could not open/read file of proxy_certificates: %s" % str(each))
+
+ encoded_certificate = base64.b64encode(data)
+ proxy_certificates.append(encoded_certificate)
+
+ if proxy_certificates:
+ response['proxySettings']['proxyCertificates'] = proxy_certificates
+
+ return response, client_id
+
+ def delete_azure_occm(self):
+ """
+ Delete OCCM
+ :return:
+ None
+ """
+ # delete vm deploy
+ try:
+ compute_client = get_client_from_cli_profile(ComputeManagementClient)
+ vm_delete = compute_client.virtual_machines.begin_delete(
+ self.parameters['resource_group'],
+ self.parameters['name'])
+ while not vm_delete.done():
+ vm_delete.wait(2)
+ except CloudError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ # delete interfaces deploy
+ try:
+ network_client = get_client_from_cli_profile(NetworkManagementClient)
+ interface_delete = network_client.network_interfaces.begin_delete(
+ self.parameters['resource_group'],
+ self.parameters['name'] + '-nic')
+ while not interface_delete.done():
+ interface_delete.wait(2)
+ except CloudError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ # delete storage account deploy
+ try:
+ storage_client = get_client_from_cli_profile(StorageManagementClient)
+ storage_client.storage_accounts.delete(
+ self.parameters['resource_group'],
+ self.parameters['storage_account'])
+ except CloudError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ # delete storage account deploy
+ try:
+ network_client = get_client_from_cli_profile(NetworkManagementClient)
+ public_ip_addresses_delete = network_client.public_ip_addresses.begin_delete(
+ self.parameters['resource_group'],
+ self.parameters['name'] + '-ip')
+ while not public_ip_addresses_delete.done():
+ public_ip_addresses_delete.wait(2)
+ except CloudError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ # delete deployment
+ try:
+ resource_client = get_client_from_cli_profile(ResourceManagementClient)
+ deployments_delete = resource_client.deployments.begin_delete(
+ self.parameters['resource_group'],
+ self.parameters['name'] + '-ip')
+ while not deployments_delete.done():
+ deployments_delete.wait(5)
+ except CloudError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ retries = 16
+ while retries > 0:
+ occm_resp, error = self.na_helper.check_occm_status(self.rest_api,
+ self.parameters['client_id'])
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: Not able to get occm status: %s, %s" % (str(error), str(occm_resp)))
+ if occm_resp['agent']['status'] != "active":
+ break
+ else:
+ time.sleep(10)
+ retries -= 1
+ if retries == 0:
+ # Taking too long for terminating OCCM
+ return self.module.fail_json(msg="Taking too long for instance to finish terminating")
+ client = self.rest_api.format_client_id(self.parameters['client_id'])
+ error = self.na_helper.delete_occm_agents(self.rest_api, [{'agentId': client}])
+ if error:
+ self.module.fail_json(msg="Error: unexpected response on deleting OCCM: %s" % (str(error)))
+
+ def apply(self):
+ """
+ Apply action to the Cloud Manager connector for AZURE
+ :return: None
+ """
+ client_id = None
+ principal_id = None
+ if not self.module.check_mode:
+ if self.parameters['state'] == 'present':
+ client_id, principal_id = self.deploy_azure()
+ self.na_helper.changed = True
+ elif self.parameters['state'] == 'absent':
+ get_deploy = self.get_deploy_azure_vm()
+ if get_deploy:
+ self.delete_azure_occm()
+ self.na_helper.changed = True
+
+ self.module.exit_json(changed=self.na_helper.changed, msg={'client_id': client_id, 'principal_id': principal_id})
+
+
+def main():
+ """
+ Create Cloud Manager connector for AZURE class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppCloudManagerConnectorAzure()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_gcp.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_gcp.py
new file mode 100644
index 000000000..bea686f4c
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_gcp.py
@@ -0,0 +1,644 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_connector_gcp
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_connector_gcp
+short_description: NetApp Cloud Manager connector for GCP.
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create or delete Cloud Manager connector for GCP.
+
+options:
+ state:
+ description:
+ - Whether the specified Cloud Manager connector for GCP should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the Cloud Manager connector for GCP to manage.
+ type: str
+
+ project_id:
+ description:
+ - The GCP project_id where the connector will be created.
+ required: true
+ type: str
+
+ zone:
+ description:
+ - The GCP zone where the Connector will be created.
+ required: true
+ type: str
+
+ gcp_service_account_email:
+ description:
+ - The email of the service_account for the connector instance. This service account is used to allow the Connector to create Cloud Volume ONTAP.
+ required: true
+ type: str
+ aliases: ['service_account_email']
+ version_added: 21.7.0
+
+ company:
+ description:
+ - The name of the company of the user.
+ required: true
+ type: str
+
+ gcp_service_account_path:
+ description:
+ - The local path of the service_account JSON file for GCP authorization purposes. This service account is used to create the Connector in GCP.
+ type: str
+ aliases: ['service_account_path']
+ version_added: 21.7.0
+
+ subnet_id:
+ description:
+ - The name of the subnet for the virtual machine.
+ type: str
+ default: default
+
+ network_project_id:
+ description:
+ - The project id in GCP associated with the Subnet. If not provided, it is assumed that the Subnet is within the previously specified project id.
+ type: str
+
+ machine_type:
+ description:
+ - The machine_type for the Connector VM.
+ type: str
+ default: n2-standard-4
+
+ firewall_tags:
+ description:
+ - Indicates whether to add firewall_tags to the connector VM (HTTP and HTTP).
+ type: bool
+ default: true
+
+ associate_public_ip:
+ description:
+ - Indicates whether to associate a public IP address to the virtual machine.
+ type: bool
+ default: true
+
+ proxy_url:
+ description:
+ - The proxy URL, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_user_name:
+ description:
+ - The proxy user name, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_password:
+ description:
+ - The proxy password, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_certificates:
+ description:
+ - The proxy certificates. A list of certificate file names.
+ type: list
+ elements: str
+
+ account_id:
+ description:
+ - The NetApp account ID that the Connector will be associated with.
+ - If not provided, Cloud Manager uses the first account. If no account exists, Cloud Manager creates a new account.
+ - You can find the account ID in the account tab of Cloud Manager at [https://cloudmanager.netapp.com](https://cloudmanager.netapp.com).
+ type: str
+
+ client_id:
+ description:
+ - The client ID of the Cloud Manager Connector.
+ - The connector ID.
+ - If state is absent, the client id is used to identify the agent and delete it.
+ - If state is absent and this parameter is not set, all agents associated with C(name) are deleted.
+ - Ignored when state is present.
+ type: str
+
+'''
+
+EXAMPLES = """
+- name: Create NetApp Cloud Manager connector for GCP
+ netapp.cloudmanager.na_cloudmanager_connector_gcp:
+ state: present
+ name: ansible-occm-gcp
+ project_id: xxxxxxx-support
+ zone: us-east4-b
+ company: NetApp
+ gcp_service_account_email: xxxxxxxx@xxxxxxx-support.iam.gserviceaccount.com
+ gcp_service_account_path: gcp_creds.json
+ proxy_user_name: test
+ proxy_password: test
+ proxy_url: http://abcdefg.com
+ proxy_certificates: ["D-TRUST_Root_Class_3_CA_2_2009.crt", "DigiCertGlobalRootCA.crt", "DigiCertGlobalRootG2.crt"]
+ account_id: account-xxxxXXXX
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+
+- name: Delete NetApp Cloud Manager connector for GCP
+ netapp.cloudmanager.na_cloudmanager_connector_gcp:
+ state: absent
+ name: ansible-occm-gcp
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ client_id: "{{ wwwwwwwwww }}"
+ project_id: xxxxxxx-support
+ zone: us-east4-b
+ company: NetApp
+ gcp_service_account_email: xxxxxxxx@xxxxxxx-support.iam.gserviceaccount.com
+ gcp_service_account_path: gcp_creds.json
+ account_id: account-xxxxXXXX
+"""
+
+RETURN = """
+client_id:
+ description: Newly created GCP connector id on cloud manager.
+ type: str
+ returned: success
+ sample: 'FDQE8SwrbjVS6mqUgZoOHQmu2DvBNRRW'
+client_ids:
+ description:
+ - a list of client ids matching the name and provider if the connector already exists.
+ - ideally the list should be empty, or contain a single element matching client_id.
+ type: list
+ elements: str
+ returned: success
+ sample: ['FDQE8SwrbjVS6mqUgZoOHQmu2DvBNRRW']
+"""
+import uuid
+import time
+import base64
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+IMPORT_ERRORS = []
+HAS_GCP_COLLECTION = False
+
+try:
+ import google.auth
+ from google.auth.transport import requests
+ from google.oauth2 import service_account
+ import yaml
+ HAS_GCP_COLLECTION = True
+except ImportError as exc:
+ IMPORT_ERRORS.append(str(exc))
+
+GCP_DEPLOYMENT_MANAGER = "www.googleapis.com"
+UUID = str(uuid.uuid4())
+
+
+class NetAppCloudManagerConnectorGCP(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ project_id=dict(required=True, type='str'),
+ zone=dict(required=True, type='str'),
+ company=dict(required=True, type='str'),
+ gcp_service_account_email=dict(required=True, type='str', aliases=['service_account_email']),
+ gcp_service_account_path=dict(required=False, type='str', aliases=['service_account_path']),
+ subnet_id=dict(required=False, type='str', default='default'),
+ network_project_id=dict(required=False, type='str'),
+ machine_type=dict(required=False, type='str', default='n2-standard-4'),
+ firewall_tags=dict(required=False, type='bool', default=True),
+ associate_public_ip=dict(required=False, type='bool', default=True),
+ proxy_url=dict(required=False, type='str'),
+ proxy_user_name=dict(required=False, type='str'),
+ proxy_password=dict(required=False, type='str', no_log=True),
+ proxy_certificates=dict(required=False, type='list', elements='str'),
+ account_id=dict(required=False, type='str'),
+ client_id=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.gcp_common_suffix_name = "-vm-boot-deployment"
+ self.fail_when_import_errors(IMPORT_ERRORS, HAS_GCP_COLLECTION)
+ super(NetAppCloudManagerConnectorGCP, self).__init__()
+
+ self.rest_api.gcp_token, error = self.get_gcp_token()
+ if error:
+ self.module.fail_json(msg='Error getting gcp token: %s' % repr(error))
+
+ def get_gcp_token(self):
+ '''
+ get gcp token from gcp service account credential json file
+ '''
+ scopes = ["https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly",
+ "https://www.googleapis.com/auth/ndev.cloudman",
+ "https://www.googleapis.com/auth/ndev.cloudman.readonly",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"]
+ if 'gcp_service_account_path' in self.parameters:
+ try:
+ fh = open(self.parameters['gcp_service_account_path'])
+ except (OSError, IOError) as error:
+ return None, "opening %s: got: %s" % (self.parameters['gcp_service_account_path'], repr(error))
+ with fh:
+ key_bytes = json.load(fh)
+ if key_bytes is None:
+ return None, "Error: gcp_service_account_path file is empty"
+ credentials = service_account.Credentials.from_service_account_file(self.parameters['gcp_service_account_path'], scopes=scopes)
+ else:
+ credentials, project = google.auth.default(scopes=scopes)
+
+ credentials.refresh(requests.Request())
+
+ return credentials.token, None
+
+ def fail_when_import_errors(self, import_errors, has_gcp_collection=True):
+ if has_gcp_collection and not import_errors:
+ return
+ msg = ''
+ if not has_gcp_collection:
+ msg = 'The python google-auth package is required. '
+ msg += 'Import errors: %s' % str(import_errors)
+ self.module.fail_json(msg=msg)
+
+ def get_deploy_vm(self):
+ '''
+ Get Cloud Manager connector for GCP
+ :return:
+ Dictionary of current details if Cloud Manager connector for GCP
+ None if Cloud Manager connector for GCP is not found
+ '''
+ api_url = GCP_DEPLOYMENT_MANAGER + '/deploymentmanager/v2/projects/%s/global/deployments/%s%s' % (
+ self.parameters['project_id'], self.parameters['name'], self.gcp_common_suffix_name)
+ headers = {
+ "X-User-Token": self.rest_api.token_type + " " + self.rest_api.token,
+ 'Authorization': self.rest_api.token_type + " " + self.rest_api.gcp_token,
+ }
+
+ occm_status, error, dummy = self.rest_api.get(api_url, header=headers)
+ if error is not None:
+ if error == '404' and b'is not found' in occm_status:
+ return None
+ self.module.fail_json(
+ msg="Error: unexpected response on getting occm: %s, %s" % (str(error), str(occm_status)))
+
+ return occm_status
+
+ def get_custom_data_for_gcp(self, proxy_certificates):
+ '''
+ get custom data for GCP
+ '''
+ # get account ID
+ if 'account_id' not in self.parameters:
+ # get account ID
+ response, error = self.na_helper.get_or_create_account(self.rest_api)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: unexpected response on getting account: %s, %s" % (str(error), str(response)))
+ self.parameters['account_id'] = response
+ # registerAgentTOServiceForGCP
+ response, error = self.na_helper.register_agent_to_service(self.rest_api, "GCP", "")
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: register agent to service for gcp failed: %s, %s" % (str(error), str(response)))
+ # add proxy_certificates as part of json data
+ client_id = response['clientId']
+ client_secret = response['clientSecret']
+ u_data = {
+ 'instanceName': self.parameters['name'],
+ 'company': self.parameters['company'],
+ 'clientId': client_id,
+ 'clientSecret': client_secret,
+ 'systemId': UUID,
+ 'tenancyAccountId': self.parameters['account_id'],
+ 'proxySettings': {'proxyPassword': self.parameters.get('proxy_password'),
+ 'proxyUserName': self.parameters.get('proxy_user_name'),
+ 'proxyUrl': self.parameters.get('proxy_url'),
+ 'proxyCertificates': proxy_certificates,
+ },
+ }
+ # convert response to json format
+ user_data = json.dumps(u_data)
+ return user_data, client_id, None
+
+ def deploy_gcp_vm(self, proxy_certificates):
+ '''
+ deploy GCP VM
+ '''
+ # getCustomDataForGCP
+ response, client_id, error = self.get_custom_data_for_gcp(proxy_certificates)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: Not able to get user data for GCP: %s, %s" % (str(error), str(response)))
+ # compose
+ user_data = response
+ gcp_custom_data = base64.b64encode(user_data.encode())
+ gcp_sa_scopes = ["https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly",
+ "https://www.googleapis.com/auth/ndev.cloudman",
+ "https://www.googleapis.com/auth/ndev.cloudman.readonly"]
+
+ tags = []
+ if self.parameters['firewall_tags'] is True:
+ tags = {'items': ['firewall-tag-bvsu', 'http-server', 'https-server']}
+
+ # first resource
+ device_name = self.parameters['name'] + '-vm-disk-boot'
+ t = {
+ 'name': self.parameters['name'] + '-vm',
+ 'properties': {
+ 'disks': [
+ {'autoDelete': True,
+ 'boot': True,
+ 'deviceName': device_name,
+ 'name': device_name,
+ 'source': "\\\"$(ref.%s.selfLink)\\\"" % device_name,
+ 'type': "PERSISTENT",
+ },
+ ],
+ 'machineType': "zones/%s/machineTypes/%s" % (self.parameters['zone'], self.parameters['machine_type']),
+ 'metadata': {
+ 'items': [
+ {'key': 'serial-port-enable',
+ 'value': 1},
+ {'key': 'customData',
+ 'value': gcp_custom_data}
+ ]
+ },
+ 'serviceAccounts': [{'email': self.parameters['gcp_service_account_email'],
+ 'scopes': gcp_sa_scopes, }],
+ 'tags': tags,
+ 'zone': self.parameters['zone']
+ },
+ 'metadata': {'dependsOn': [device_name]},
+ 'type': 'compute.v1.instance',
+ }
+
+ access_configs = []
+ if self.parameters['associate_public_ip'] is True:
+ access_configs = [{'kind': 'compute#accessConfig',
+ 'name': 'External NAT',
+ 'type': 'ONE_TO_ONE_NAT',
+ 'networkTier': 'PREMIUM'
+ }]
+ project_id = self.parameters['project_id']
+ if self.parameters.get('network_project_id'):
+ project_id = self.parameters['network_project_id']
+
+ t['properties']['networkInterfaces'] = [
+ {'accessConfigs': access_configs,
+ 'kind': 'compute#networkInterface',
+ 'subnetwork': 'projects/%s/regions/%s/subnetworks/%s' % (
+ project_id, self.parameters['region'], self.parameters['subnet_id'])
+ }]
+
+ td = {
+ 'name': device_name,
+ 'properties': {'name': device_name,
+ 'sizeGb': 100,
+ 'sourceImage': 'projects/%s/global/images/family/%s' % (self.rest_api.environment_data['GCP_IMAGE_PROJECT'],
+ self.rest_api.environment_data['GCP_IMAGE_FAMILY']),
+ 'type': 'zones/%s/diskTypes/pd-ssd' % (self.parameters['zone']),
+ 'zone': self.parameters['zone']
+ },
+ 'type': 'compute.v1.disks',
+ }
+ content = {
+ 'resources': [t, td]
+ }
+ my_data = str(yaml.dump(content))
+ # The template must be in this format:
+ # {
+ # "name": "ansible-cycc-vm-boot-deployment",
+ # "target": {
+ # "config": {
+ # "content": "resources:
+ # - name: xxxx
+ # properties:
+ # ...
+ # "
+ # }
+ # }
+ # }
+ gcp_deployment_template = '{\n "name": "%s%s",\n "target": {\n "config": {\n "content": "%s"\n }\n}\n}' % (
+ self.parameters['name'], '-vm-boot-deployment', my_data)
+
+ # post
+ api_url = GCP_DEPLOYMENT_MANAGER + '/deploymentmanager/v2/projects/%s/global/deployments' % (
+ self.parameters['project_id'])
+
+ headers = {
+ 'X-User-Token': self.rest_api.token_type + " " + self.rest_api.gcp_token,
+ 'X-Tenancy-Account-Id': self.parameters['account_id'],
+ 'Authorization': self.rest_api.token_type + " " + self.rest_api.gcp_token,
+ 'Content-type': "application/json",
+ 'Referer': "Ansible_NetApp",
+ 'X-Agent-Id': self.rest_api.format_client_id(client_id)
+ }
+
+ response, error, dummy = self.rest_api.post(api_url, data=gcp_deployment_template, header=headers,
+ gcp_type=True)
+ if error is not None:
+ return response, client_id, error
+
+ # check occm status
+ # Sleep for 1 minutes
+ time.sleep(60)
+ retries = 16
+ while retries > 0:
+ agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, client_id)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: Not able to get occm status: %s, %s" % (str(error), str(agent)),
+ client_id=client_id, changed=True)
+ if agent['status'] == "active":
+ break
+ else:
+ time.sleep(30)
+ retries -= 1
+ if retries == 0:
+ # Taking too long for status to be active
+ msg = "Connector VM is created and registered. Taking too long for OCCM agent to be active or not properly setup."
+ msg += ' Latest status: %s' % agent
+ self.module.fail_json(msg=msg, client_id=client_id, changed=True)
+
+ return response, client_id, error
+
+ def create_occm_gcp(self):
+ '''
+ Create Cloud Manager connector for GCP
+ '''
+ # check proxy configuration
+ if 'proxy_user_name' in self.parameters and 'proxy_url' not in self.parameters:
+ self.module.fail_json(msg="Error: missing proxy_url")
+ if 'proxy_password' in self.parameters and 'proxy_url' not in self.parameters:
+ self.module.fail_json(msg="Error: missing proxy_url")
+
+ proxy_certificates = []
+ if 'proxy_certificates' in self.parameters:
+ for c_file in self.parameters['proxy_certificates']:
+ proxy_certificate, error = self.na_helper.encode_certificates(c_file)
+ # add to proxy_certificates list
+ if error is not None:
+ self.module.fail_json(msg="Error: not able to read certificate file %s" % c_file)
+ proxy_certificates.append(proxy_certificate)
+ # region is the super class of zone. For example, zone us-east4-b is one of the zone in region us-east4
+ self.parameters['region'] = self.parameters['zone'][:-2]
+ # deploy GCP VM
+ response, client_id, error = self.deploy_gcp_vm(proxy_certificates)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: create_occm_gcp: %s, %s" % (str(error), str(response)))
+ return client_id
+
+ def delete_occm_gcp(self):
+ '''
+ Delete Cloud Manager connector for GCP
+ '''
+ api_url = GCP_DEPLOYMENT_MANAGER + '/deploymentmanager/v2/projects/%s/global/deployments/%s%s' % (
+ self.parameters['project_id'],
+ self.parameters['name'],
+ self.gcp_common_suffix_name)
+ headers = {
+ "X-User-Token": self.rest_api.token_type + " " + self.rest_api.token,
+ 'Authorization': self.rest_api.token_type + " " + self.rest_api.gcp_token,
+ 'X-Tenancy-Account-Id': self.parameters['account_id'],
+ 'Content-type': "application/json",
+ 'Referer': "Ansible_NetApp",
+ }
+
+ response, error, dummy = self.rest_api.delete(api_url, None, header=headers)
+ if error is not None:
+ return "Error: unexpected response on deleting VM: %s, %s" % (str(error), str(response))
+ # sleep for 30 sec
+ time.sleep(30)
+ if 'client_id' not in self.parameters:
+ return None
+ # check occm status
+ retries = 30
+ while retries > 0:
+ agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id'])
+ if error is not None:
+ return "Error: Not able to get occm status after deleting VM: %s, %s" % (str(error), str(agent))
+ if agent['status'] != ["active", "pending"]:
+ break
+ else:
+ time.sleep(10)
+ retries -= 1 if agent['status'] == "active" else 5
+ if retries == 0 and agent['status'] == "active":
+ # Taking too long for terminating OCCM
+ return "Taking too long for instance to finish terminating. Latest status: %s" % str(agent)
+ return None
+
+ def delete_occm_agents(self, agents):
+ error = self.na_helper.delete_occm_agents(self.rest_api, agents)
+ if error:
+ return "Error: deleting OCCM agent(s): %s" % error
+ return None
+
+ def get_occm_agents(self):
+ if 'client_id' in self.parameters and self.parameters['state'] == 'absent':
+ agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id'])
+ if error == '403' and b'Action not allowed for user' in agent:
+ # assume the agent does not exist anymore
+ agents, error = [], None
+ self.module.warn('Client Id %s was not found for this account.' % self.parameters['client_id'])
+ else:
+ agents = [agent]
+ else:
+ agents, error = self.na_helper.get_occm_agents_by_name(self.rest_api, self.parameters['account_id'],
+ self.parameters['name'], 'GCP')
+ if error:
+ self.module.fail_json(
+ msg="Error: getting OCCM agents: %s, %s" % (str(error), str(agents)))
+ return agents
+
+ def set_client_id(self, agents):
+ client_id = ""
+ client_ids = [agent['agentId'] for agent in agents if 'agentId' in agent]
+ if len(client_ids) == 1:
+ client_id = client_ids[0]
+ self.parameters['client_id'] = client_ids[0]
+ elif 'client_id' in self.parameters and self.parameters['client_id'] in client_ids:
+ client_id = self.parameters['client_id']
+ return client_id, client_ids
+
+ def apply(self):
+ """
+ Apply action to the Cloud Manager connector for GCP
+ :return: None
+ """
+ client_id = ""
+ agents, client_ids = [], []
+ current_vm = self.get_deploy_vm()
+ if current_vm and current_vm['operation']['status'] == 'terminated':
+ current_vm = None
+ current = current_vm
+ if self.parameters['state'] == 'absent' or current:
+ agents = self.get_occm_agents()
+ client_id, client_ids = self.set_client_id(agents)
+ if agents and current is None:
+ current = {}
+ if agents:
+ current['agents'] = agents
+
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ client_id = self.create_occm_gcp()
+ elif cd_action == 'delete':
+ errors = []
+ if current_vm:
+ error = self.delete_occm_gcp()
+ if error:
+ errors.append(error)
+ if agents:
+ error = self.delete_occm_agents(agents)
+ if error:
+ errors.append(error)
+ if errors:
+ self.module.fail_json(msg='. '.join(errors))
+
+ self.module.exit_json(changed=self.na_helper.changed, client_id=client_id, client_ids=client_ids)
+
+
+def main():
+ """
+ Create Cloud Manager connector for GCP class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppCloudManagerConnectorGCP()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_aws.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_aws.py
new file mode 100644
index 000000000..3de1ebc53
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_aws.py
@@ -0,0 +1,855 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_cvo_aws
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_cvo_aws
+short_description: NetApp Cloud Manager CVO for AWS
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, delete, or manage Cloud Manager CVO for AWS.
+
+options:
+
+ state:
+ description:
+ - Whether the specified Cloud Manager CVO for AWS should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the Cloud Manager CVO for AWS to manage.
+ type: str
+
+ instance_type:
+ description:
+ - The instance type to use, which depends on the license type.
+ - Explore ['m5.xlarge'].
+ - Standard ['m5.2xlarge','r5.xlarge'].
+ - Premium ['m5.4xlarge','r5.2xlarge','c4.8xlarge'].
+ - For more supported instance types, refer to Cloud Volumes ONTAP Release Notes.
+ type: str
+ default: m5.2xlarge
+
+ license_type:
+ description:
+ - The type of license to use.
+ - For single node by Capacity ['capacity-paygo']
+ - For single node by Node paygo ['cot-explore-paygo', 'cot-standard-paygo', 'cot-premium-paygo'].
+ - For single node by Node boyl ['cot-premium-byol'].
+ - For HA by Capacity ['ha-capacity-paygo']
+ - For HA by Node paygo ['ha-cot-explore-paygo','ha-cot-standard-paygo','ha-cot-premium-paygo'].
+ - For HA by Node boyl ['ha-cot-premium-byol'].
+ choices: ['capacity-paygo', 'cot-standard-paygo', 'cot-premium-paygo', 'cot-explore-paygo', 'cot-premium-byol', \
+ 'ha-cot-standard-paygo', 'ha-cot-premium-paygo', 'ha-cot-premium-byol', 'ha-cot-explore-paygo', \
+ 'ha-capacity-paygo']
+ default: capacity-paygo
+ type: str
+
+ provided_license:
+ description:
+ - Using a NLF license file for BYOL deployment.
+ type: str
+
+ capacity_package_name:
+ description:
+ - Capacity package name is required when selecting a capacity based license.
+ - Essential only available with Bring Your Own License Capacity-Based.
+ - Professional available as an annual contract from AWS marketplace or Bring Your Own License Capacity-Based.
+ choices: ['Professional', 'Essential', 'Freemium']
+ default: 'Essential'
+ type: str
+ version_added: 21.12.0
+
+ workspace_id:
+ description:
+ - The ID of the Cloud Manager workspace where you want to deploy Cloud Volumes ONTAP.
+ - If not provided, Cloud Manager uses the first workspace.
+ - You can find the ID from the Workspace tab on U(https://cloudmanager.netapp.com).
+ type: str
+
+ subnet_id:
+ description:
+ - The subnet id where the working environment will be created. Required when single node only.
+ type: str
+
+ vpc_id:
+ description:
+ - The VPC ID where the working environment will be created.
+ - If this argument is not provided, the VPC will be calculated by using the provided subnet ID.
+ type: str
+
+ region:
+ required: true
+ description:
+ - The region where the working environment will be created.
+ type: str
+
+ data_encryption_type:
+ description:
+ - The type of encryption to use for the working environment.
+ choices: ['AWS', 'NONE']
+ default: 'AWS'
+ type: str
+
+ client_id:
+ required: true
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ - You can find the ID from the Connector tab on U(https://cloudmanager.netapp.com).
+ type: str
+
+ ebs_volume_size:
+ description:
+ - EBS volume size for the first data aggregate.
+ - For GB, the value can be [100 or 500].
+ - For TB, the value can be [1,2,4,8,16].
+ default: 1
+ type: int
+
+ ebs_volume_size_unit:
+ description:
+ - The unit for ebs volume size.
+ choices: ['GB', 'TB']
+ default: 'TB'
+ type: str
+
+ ebs_volume_type:
+ description:
+ - The EBS volume type for the first data aggregate.
+ choices: ['gp3', 'gp2', 'io1', 'sc1', 'st1']
+ default: 'gp2'
+ type: str
+
+ security_group_id:
+ description:
+ - The ID of the security group for the working environment. If not provided, Cloud Manager creates the security group.
+ type: str
+
+ instance_profile_name:
+ description:
+ - The instance profile name for the working environment. If not provided, Cloud Manager creates the instance profile.
+ type: str
+
+ svm_password:
+ required: true
+ description:
+ - The admin password for Cloud Volumes ONTAP.
+ - It will be updated on each run.
+ type: str
+
+ svm_name:
+ description:
+ - The name of the SVM.
+ type: str
+ version_added: 21.22.0
+
+ ontap_version:
+ description:
+ - The required ONTAP version. Ignored if 'use_latest_version' is set to true.
+ type: str
+ default: 'latest'
+
+ use_latest_version:
+ description:
+ - Indicates whether to use the latest available ONTAP version.
+ type: bool
+ default: true
+
+ platform_serial_number:
+ description:
+ - The serial number for the cluster. This is required when using 'cot-premium-byol'.
+ type: str
+
+ tier_level:
+ description:
+ - The tiering level when 'capacity_tier' is set to 'S3'.
+ choices: ['normal', 'ia', 'ia-single', 'intelligent']
+ default: 'normal'
+ type: str
+
+ cluster_key_pair_name:
+ description:
+ - SSH authentication key pair name
+ type: str
+ version_added: 21.20.0
+
+ nss_account:
+ description:
+ - The NetApp Support Site account ID to use with this Cloud Volumes ONTAP system.
+ - If the license type is BYOL and an NSS account is not provided, Cloud Manager tries to use the first existing NSS account.
+ type: str
+
+ writing_speed_state:
+ description:
+ - The write speed setting for Cloud Volumes ONTAP ['NORMAL','HIGH'].
+ - This argument is not relevant for HA pairs.
+ type: str
+
+ iops:
+ description:
+ - Provisioned IOPS. Required only when provider_volume_type is 'io1' or 'gp3'.
+ type: int
+
+ throughput:
+ description:
+ - Unit is Mb/s. Valid range 125-1000.
+ - Required only when provider_volume_type is 'gp3'.
+ type: int
+
+ capacity_tier:
+ description:
+ - Whether to enable data tiering for the first data aggregate.
+ choices: ['S3', 'NONE']
+ default: 'S3'
+ type: str
+
+ instance_tenancy:
+ description:
+ - The EC2 instance tenancy.
+ choices: ['default', 'dedicated']
+ default: 'default'
+ type: str
+
+ cloud_provider_account:
+ description:
+ - The cloud provider credentials id to use when deploying the Cloud Volumes ONTAP system.
+ - You can find the ID in Cloud Manager from the Settings > Credentials page.
+ - If not specified, Cloud Manager uses the instance profile of the Connector.
+ type: str
+
+ backup_volumes_to_cbs:
+ description:
+ - Automatically enable back up of all volumes to S3.
+ default: false
+ type: bool
+
+ enable_compliance:
+ description:
+ - Enable the Cloud Compliance service on the working environment.
+ default: false
+ type: bool
+
+ enable_monitoring:
+ description:
+ - Enable the Monitoring service on the working environment.
+ default: false
+ type: bool
+
+ optimized_network_utilization:
+ description:
+ - Use optimized network utilization.
+ default: true
+ type: bool
+
+ kms_key_id:
+ description:
+ - Aws Encryption parameters. It is required if using aws encryption. Only one of KMS key id or KMS arn should be specified.
+ type: str
+
+ kms_key_arn:
+ description:
+ - AWS encryption parameters. It is required if using aws encryption. Only one of KMS key id or KMS arn should be specified.
+ type: str
+ version_added: 21.10.0
+
+ aws_tag:
+ description:
+ - Additional tags for the AWS CVO working environment.
+ type: list
+ elements: dict
+ suboptions:
+ tag_key:
+ description: The key of the tag.
+ type: str
+ tag_value:
+ description: The tag value.
+ type: str
+
+ is_ha:
+ description:
+ - Indicate whether the working environment is an HA pair or not.
+ type: bool
+ default: false
+
+ platform_serial_number_node1:
+ description:
+ - For HA BYOL, the serial number for the first node. This is required when using 'ha-cot-premium-byol'.
+ type: str
+
+ platform_serial_number_node2:
+ description:
+ - For HA BYOL, the serial number for the second node. This is required when using 'ha-cot-premium-byol'.
+ type: str
+
+ node1_subnet_id:
+ description:
+ - For HA, the subnet ID of the first node.
+ type: str
+
+ node2_subnet_id:
+ description:
+ - For HA, the subnet ID of the second node.
+ type: str
+
+ mediator_subnet_id:
+ description:
+ - For HA, the subnet ID of the mediator.
+ type: str
+
+ failover_mode:
+ description:
+ - For HA, the failover mode for the HA pair. 'PrivateIP' is for a single availability zone and 'FloatingIP' is for multiple availability zones.
+ type: str
+ choices: ['PrivateIP', 'FloatingIP']
+
+ mediator_assign_public_ip:
+ description:
+ - Boolean option to assign public IP.
+ type: bool
+ default: true
+
+ mediator_key_pair_name:
+ description:
+ - For HA, the key pair name for the mediator instance.
+ type: str
+
+ cluster_floating_ip:
+ description:
+ - For HA FloatingIP, the cluster management floating IP address.
+ type: str
+
+ data_floating_ip:
+ description:
+ - For HA FloatingIP, the data floating IP address.
+ type: str
+
+ data_floating_ip2:
+ description:
+ - For HA FloatingIP, the data floating IP address.
+ type: str
+
+ svm_floating_ip:
+ description:
+ - For HA FloatingIP, the SVM management floating IP address.
+ type: str
+
+ route_table_ids:
+ description:
+ - For HA FloatingIP, the list of route table IDs that will be updated with the floating IPs.
+ type: list
+ elements: str
+
+ upgrade_ontap_version:
+ description:
+ - Indicates whether to upgrade ONTAP image on the CVO.
+ - If the current version already matches the desired version, no action is taken.
+ type: bool
+ default: false
+ version_added: 21.13.0
+
+ update_svm_password:
+ description:
+ - Indicates whether to update svm_password on the CVO.
+ - When set to true, the module is not idempotent, as we cannot read the current password.
+ type: bool
+ default: false
+ version_added: 21.13.0
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = """
+- name: Create NetApp Cloud Manager CVO for AWS single
+ netapp.cloudmanager.na_cloudmanager_cvo_aws:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: AnsibleCVO
+ region: us-west-1
+ subnet_id: subnet-xxxxxxx
+ vpc_id: vpc-xxxxxxxx
+ svm_password: P@assword!
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+ writing_speed_state: NORMAL
+ aws_tag: [
+ {tag_key: abc,
+ tag_value: a123}]
+
+- name: Create NetApp Cloud Manager CVO for AWS HA
+ netapp.cloudmanager.na_cloudmanager_cvo_aws:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: AnsibleCVO
+ region: us-west-1
+ subnet_id: subnet-xxxxxxx
+ vpc_id: vpc-xxxxxxxx
+ svm_password: P@assword!
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+ writing_speed_state: NORMAL
+ aws_tag: [
+ {tag_key: abc,
+ tag_value: a123}]
+ is_ha: true
+ failover_mode: FloatingIP
+ node1_subnet_id: subnet-1
+ node2_subnet_id: subnet-1
+ mediator_subnet_id: subnet-1
+ mediator_key_pair_name: key1
+ cluster_floating_ip: 2.1.1.1
+ data_floating_ip: 2.1.1.2
+ data_floating_ip2: 2.1.1.3
+ svm_floating_ip: 2.1.1.4
+ route_table_ids: [rt-1,rt-2]
+
+- name: Delete NetApp Cloud Manager cvo for AWS
+ netapp.cloudmanager.na_cloudmanager_cvo_aws:
+ state: absent
+ name: ansible
+ region: us-west-1
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ subnet_id: subnet-xxxxxxx
+ vpc_id: vpc-xxxxxxxx
+ svm_password: P@assword!
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+"""
+
+RETURN = '''
+working_environment_id:
+ description: Newly created AWS CVO working_environment_id.
+ type: str
+ returned: success
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+IMPORT_EXCEPTION = None
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_AWS_LIB = True
+except ImportError as exc:
+ HAS_AWS_LIB = False
+ IMPORT_EXCEPTION = exc
+
+AWS_License_Types = ['cot-standard-paygo', 'cot-premium-paygo', 'cot-explore-paygo', 'cot-premium-byol', 'ha-cot-standard-paygo',
+ 'ha-cot-premium-paygo', 'ha-cot-premium-byol', 'ha-cot-explore-paygo', 'capacity-paygo', 'ha-capacity-paygo']
+
+
+class NetAppCloudManagerCVOAWS:
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ instance_type=dict(required=False, type='str', default='m5.2xlarge'),
+ license_type=dict(required=False, type='str', choices=AWS_License_Types, default='capacity-paygo'),
+ workspace_id=dict(required=False, type='str'),
+ subnet_id=dict(required=False, type='str'),
+ vpc_id=dict(required=False, type='str'),
+ region=dict(required=True, type='str'),
+ data_encryption_type=dict(required=False, type='str', choices=['AWS', 'NONE'], default='AWS'),
+ ebs_volume_size=dict(required=False, type='int', default='1'),
+ ebs_volume_size_unit=dict(required=False, type='str', choices=['GB', 'TB'], default='TB'),
+ ebs_volume_type=dict(required=False, type='str', choices=['gp3', 'gp2', 'io1', 'sc1', 'st1'], default='gp2'),
+ svm_password=dict(required=True, type='str', no_log=True),
+ svm_name=dict(required=False, type='str'),
+ ontap_version=dict(required=False, type='str', default='latest'),
+ use_latest_version=dict(required=False, type='bool', default=True),
+ platform_serial_number=dict(required=False, type='str'),
+ capacity_package_name=dict(required=False, type='str', choices=['Professional', 'Essential', 'Freemium'], default='Essential'),
+ provided_license=dict(required=False, type='str'),
+ tier_level=dict(required=False, type='str', choices=['normal', 'ia', 'ia-single', 'intelligent'], default='normal'),
+ cluster_key_pair_name=dict(required=False, type='str'),
+ nss_account=dict(required=False, type='str'),
+ writing_speed_state=dict(required=False, type='str'),
+ iops=dict(required=False, type='int'),
+ throughput=dict(required=False, type='int'),
+ capacity_tier=dict(required=False, type='str', choices=['S3', 'NONE'], default='S3'),
+ instance_tenancy=dict(required=False, type='str', choices=['default', 'dedicated'], default='default'),
+ instance_profile_name=dict(required=False, type='str'),
+ security_group_id=dict(required=False, type='str'),
+ cloud_provider_account=dict(required=False, type='str'),
+ backup_volumes_to_cbs=dict(required=False, type='bool', default=False),
+ enable_compliance=dict(required=False, type='bool', default=False),
+ enable_monitoring=dict(required=False, type='bool', default=False),
+ optimized_network_utilization=dict(required=False, type='bool', default=True),
+ kms_key_id=dict(required=False, type='str', no_log=True),
+ kms_key_arn=dict(required=False, type='str', no_log=True),
+ client_id=dict(required=True, type='str'),
+ aws_tag=dict(required=False, type='list', elements='dict', options=dict(
+ tag_key=dict(type='str', no_log=False),
+ tag_value=dict(type='str')
+ )),
+ is_ha=dict(required=False, type='bool', default=False),
+ platform_serial_number_node1=dict(required=False, type='str'),
+ platform_serial_number_node2=dict(required=False, type='str'),
+ failover_mode=dict(required=False, type='str', choices=['PrivateIP', 'FloatingIP']),
+ mediator_assign_public_ip=dict(required=False, type='bool', default=True),
+ node1_subnet_id=dict(required=False, type='str'),
+ node2_subnet_id=dict(required=False, type='str'),
+ mediator_subnet_id=dict(required=False, type='str'),
+ mediator_key_pair_name=dict(required=False, type='str'),
+ cluster_floating_ip=dict(required=False, type='str'),
+ data_floating_ip=dict(required=False, type='str'),
+ data_floating_ip2=dict(required=False, type='str'),
+ svm_floating_ip=dict(required=False, type='str'),
+ route_table_ids=dict(required=False, type='list', elements='str'),
+ upgrade_ontap_version=dict(required=False, type='bool', default=False),
+ update_svm_password=dict(required=False, type='bool', default=False),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ['ebs_volume_type', 'gp3', ['iops', 'throughput']],
+ ['ebs_volume_type', 'io1', ['iops']],
+ ['license_type', 'cot-premium-byol', ['platform_serial_number']],
+ ['license_type', 'ha-cot-premium-byol', ['platform_serial_number_node1', 'platform_serial_number_node2']],
+ ['license_type', 'capacity-paygo', ['capacity_package_name']],
+ ['license_type', 'ha-capacity-paygo', ['capacity_package_name']],
+ ],
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ mutually_exclusive=[['kms_key_id', 'kms_key_arn']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ supports_check_mode=True,
+ )
+
+ if HAS_AWS_LIB is False:
+ self.module.fail_json(msg="the python AWS library boto3 and botocore is required. Command is pip install boto3."
+ "Import error: %s" % str(IMPORT_EXCEPTION))
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.changeable_params = ['aws_tag', 'svm_password', 'svm_name', 'tier_level', 'ontap_version', 'instance_type', 'license_type', 'writing_speed_state']
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.rest_api.api_root_path = '/occm/api/%s' % ('aws/ha' if self.parameters['is_ha'] else 'vsa')
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+
+ def get_vpc(self):
+ """
+ Get vpc
+ :return: vpc ID
+ """
+ vpc_result = None
+ ec2 = boto3.client('ec2', region_name=self.parameters['region'])
+
+ vpc_input = {'SubnetIds': [self.parameters['subnet_id']]}
+
+ try:
+ vpc_result = ec2.describe_subnets(**vpc_input)
+ except ClientError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ return vpc_result['Subnets'][0]['VpcId']
+
+ def create_cvo_aws(self):
+ """ Create AWS CVO """
+ if self.parameters.get('workspace_id') is None:
+ response, msg = self.na_helper.get_tenant(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['workspace_id'] = response
+
+ if self.parameters.get('vpc_id') is None and self.parameters['is_ha'] is False:
+ self.parameters['vpc_id'] = self.get_vpc()
+
+ if self.parameters.get('nss_account') is None:
+ if self.parameters.get('platform_serial_number') is not None:
+ if not self.parameters['platform_serial_number'].startswith('Eval-') and self.parameters['license_type'] == 'cot-premium-byol':
+ response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['nss_account'] = response
+ elif self.parameters.get('platform_serial_number_node1') is not None and self.parameters.get('platform_serial_number_node2') is not None:
+ if not self.parameters['platform_serial_number_node1'].startswith('Eval-')\
+ and not self.parameters['platform_serial_number_node2'].startswith('Eval-')\
+ and self.parameters['license_type'] == 'ha-cot-premium-byol':
+ response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['nss_account'] = response
+
+ json = {"name": self.parameters['name'],
+ "region": self.parameters['region'],
+ "tenantId": self.parameters['workspace_id'],
+ "vpcId": self.parameters['vpc_id'],
+ "dataEncryptionType": self.parameters['data_encryption_type'],
+ "ebsVolumeSize": {
+ "size": self.parameters['ebs_volume_size'],
+ "unit": self.parameters['ebs_volume_size_unit']},
+ "ebsVolumeType": self.parameters['ebs_volume_type'],
+ "svmPassword": self.parameters['svm_password'],
+ "backupVolumesToCbs": self.parameters['backup_volumes_to_cbs'],
+ "enableCompliance": self.parameters['enable_compliance'],
+ "enableMonitoring": self.parameters['enable_monitoring'],
+ "optimizedNetworkUtilization": self.parameters['optimized_network_utilization'],
+ "vsaMetadata": {
+ "ontapVersion": self.parameters['ontap_version'],
+ "licenseType": self.parameters['license_type'],
+ "useLatestVersion": self.parameters['use_latest_version'],
+ "instanceType": self.parameters['instance_type']},
+ }
+
+ if self.parameters['capacity_tier'] == "S3":
+ json.update({"capacityTier": self.parameters['capacity_tier'],
+ "tierLevel": self.parameters['tier_level']})
+
+ # clean default value if it is not by Capacity license
+ if not self.parameters['license_type'].endswith('capacity-paygo'):
+ json['vsaMetadata'].update({"capacityPackageName": ''})
+
+ if self.parameters.get('platform_serial_number') is not None:
+ json['vsaMetadata'].update({"platformSerialNumber": self.parameters['platform_serial_number']})
+
+ if self.parameters.get('provided_license') is not None:
+ json['vsaMetadata'].update({"providedLicense": self.parameters['provided_license']})
+
+ if self.parameters.get('capacity_package_name') is not None:
+ json['vsaMetadata'].update({"capacityPackageName": self.parameters['capacity_package_name']})
+
+ if self.parameters.get('writing_speed_state') is not None:
+ json.update({"writingSpeedState": self.parameters['writing_speed_state'].upper()})
+
+ if self.parameters.get('iops') is not None:
+ json.update({"iops": self.parameters['iops']})
+
+ if self.parameters.get('throughput') is not None:
+ json.update({"throughput": self.parameters['throughput']})
+
+ if self.parameters.get('cluster_key_pair_name') is not None:
+ json.update({"clusterKeyPairName": self.parameters['cluster_key_pair_name']})
+
+ if self.parameters.get('instance_tenancy') is not None:
+ json.update({"instanceTenancy": self.parameters['instance_tenancy']})
+
+ if self.parameters.get('instance_profile_name') is not None:
+ json.update({"instanceProfileName": self.parameters['instance_profile_name']})
+
+ if self.parameters.get('security_group_id') is not None:
+ json.update({"securityGroupId": self.parameters['security_group_id']})
+
+ if self.parameters.get('cloud_provider_account') is not None:
+ json.update({"cloudProviderAccount": self.parameters['cloud_provider_account']})
+
+ if self.parameters.get('backup_volumes_to_cbs') is not None:
+ json.update({"backupVolumesToCbs": self.parameters['backup_volumes_to_cbs']})
+
+ if self.parameters.get('svm_name') is not None:
+ json.update({"svmName": self.parameters['svm_name']})
+
+ if self.parameters['data_encryption_type'] == "AWS":
+ if self.parameters.get('kms_key_id') is not None:
+ json.update({"awsEncryptionParameters": {"kmsKeyId": self.parameters['kms_key_id']}})
+ if self.parameters.get('kms_key_arn') is not None:
+ json.update({"awsEncryptionParameters": {"kmsKeyArn": self.parameters['kms_key_arn']}})
+
+ if self.parameters.get('aws_tag') is not None:
+ tags = []
+ for each_tag in self.parameters['aws_tag']:
+ tag = {
+ 'tagKey': each_tag['tag_key'],
+ 'tagValue': each_tag['tag_value']
+ }
+
+ tags.append(tag)
+ json.update({"awsTags": tags})
+
+ if self.parameters['is_ha'] is True:
+ ha_params = dict({
+ "mediatorAssignPublicIP": self.parameters['mediator_assign_public_ip']
+ })
+
+ if self.parameters.get('failover_mode'):
+ ha_params["failoverMode"] = self.parameters['failover_mode']
+
+ if self.parameters.get('node1_subnet_id'):
+ ha_params["node1SubnetId"] = self.parameters['node1_subnet_id']
+
+ if self.parameters.get('node2_subnet_id'):
+ ha_params["node2SubnetId"] = self.parameters['node2_subnet_id']
+
+ if self.parameters.get('mediator_subnet_id'):
+ ha_params["mediatorSubnetId"] = self.parameters['mediator_subnet_id']
+
+ if self.parameters.get('mediator_key_pair_name'):
+ ha_params["mediatorKeyPairName"] = self.parameters['mediator_key_pair_name']
+
+ if self.parameters.get('cluster_floating_ip'):
+ ha_params["clusterFloatingIP"] = self.parameters['cluster_floating_ip']
+
+ if self.parameters.get('data_floating_ip'):
+ ha_params["dataFloatingIP"] = self.parameters['data_floating_ip']
+
+ if self.parameters.get('data_floating_ip2'):
+ ha_params["dataFloatingIP2"] = self.parameters['data_floating_ip2']
+
+ if self.parameters.get('svm_floating_ip'):
+ ha_params["svmFloatingIP"] = self.parameters['svm_floating_ip']
+
+ if self.parameters.get('route_table_ids'):
+ ha_params["routeTableIds"] = self.parameters['route_table_ids']
+
+ if self.parameters.get('platform_serial_number_node1'):
+ ha_params["platformSerialNumberNode1"] = self.parameters['platform_serial_number_node1']
+
+ if self.parameters.get('platform_serial_number_node2'):
+ ha_params["platformSerialNumberNode2"] = self.parameters['platform_serial_number_node2']
+
+ json["haParams"] = ha_params
+
+ else:
+ json["subnetId"] = self.parameters['subnet_id']
+
+ api_url = '%s/working-environments' % self.rest_api.api_root_path
+ response, error, on_cloud_request_id = self.rest_api.post(api_url, json, header=self.headers)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: unexpected response on creating cvo aws: %s, %s" % (str(error), str(response)))
+ working_environment_id = response['publicId']
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
+
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "create", 60, 60)
+
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating CVO AWS: %s" % str(err))
+
+ return working_environment_id
+
+ def update_cvo_aws(self, working_environment_id, modify):
+ base_url = '%s/working-environments/%s/' % (self.rest_api.api_root_path, working_environment_id)
+ for item in modify:
+ if item == 'svm_password':
+ response, error = self.na_helper.update_svm_password(base_url, self.rest_api, self.headers, self.parameters['svm_password'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'svm_name':
+ response, error = self.na_helper.update_svm_name(base_url, self.rest_api, self.headers, self.parameters['svm_name'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'aws_tag':
+ tag_list = None
+ if 'aws_tag' in self.parameters:
+ tag_list = self.parameters['aws_tag']
+ response, error = self.na_helper.update_cvo_tags(base_url, self.rest_api, self.headers, 'aws_tag', tag_list)
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'tier_level':
+ response, error = self.na_helper.update_tier_level(base_url, self.rest_api, self.headers, self.parameters['tier_level'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'writing_speed_state':
+ response, error = self.na_helper.update_writing_speed_state(base_url, self.rest_api, self.headers, self.parameters['writing_speed_state'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'ontap_version':
+ response, error = self.na_helper.upgrade_ontap_image(self.rest_api, self.headers, self.parameters['ontap_version'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'instance_type' or item == 'license_type':
+ response, error = self.na_helper.update_instance_license_type(base_url, self.rest_api, self.headers,
+ self.parameters['instance_type'],
+ self.parameters['license_type'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ def delete_cvo_aws(self, we_id):
+ """
+ Delete AWS CVO
+ """
+ api_url = '%s/working-environments/%s' % (self.rest_api.api_root_path, we_id)
+ response, error, on_cloud_request_id = self.rest_api.delete(api_url, None, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on deleting cvo aws: %s, %s" % (str(error), str(response)))
+
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "delete", 40, 60)
+
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response wait_on_completion for deleting CVO AWS: %s" % str(err))
+
+ def validate_cvo_params(self):
+ if self.parameters['use_latest_version'] is True and self.parameters['ontap_version'] != "latest":
+ self.module.fail_json(msg="ontap_version parameter not required when having use_latest_version as true")
+
+ if self.parameters['is_ha'] is True and self.parameters['license_type'] == "ha-cot-premium-byol":
+ if self.parameters.get('platform_serial_number_node1') is None or self.parameters.get('platform_serial_number_node2') is None:
+ self.module.fail_json(msg="both platform_serial_number_node1 and platform_serial_number_node2 parameters are required"
+ "when having ha type as true and license_type as ha-cot-premium-byol")
+
+ if self.parameters['is_ha'] is True and self.parameters['license_type'] == 'capacity-paygo':
+ self.parameters['license_type'] = 'ha-capacity-paygo'
+
+ def apply(self):
+ """
+ Apply action to the Cloud Manager CVO for AWS
+ :return: None
+ """
+ working_environment_id = None
+ modify = None
+ current, dummy = self.na_helper.get_working_environment_details_by_name(self.rest_api, self.headers,
+ self.parameters['name'], "aws")
+ if current:
+ self.parameters['working_environment_id'] = current['publicId']
+ # check the action
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if current and self.parameters['state'] != 'absent':
+ # Check mandatory parameters
+ self.validate_cvo_params()
+ working_environment_id = current['publicId']
+ modify, error = self.na_helper.is_cvo_update_needed(self.rest_api, self.headers, self.parameters, self.changeable_params, 'aws')
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == "create":
+ self.validate_cvo_params()
+ working_environment_id = self.create_cvo_aws()
+ elif cd_action == "delete":
+ self.delete_cvo_aws(current['publicId'])
+ else:
+ self.update_cvo_aws(current['publicId'], modify)
+
+ self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id)
+
+
+def main():
+ """
+ Create Cloud Manager CVO for AWS class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppCloudManagerCVOAWS()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_azure.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_azure.py
new file mode 100644
index 000000000..3212323e0
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_azure.py
@@ -0,0 +1,746 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_cvo_azure
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_cvo_azure
+short_description: NetApp Cloud Manager CVO/working environment in single or HA mode for Azure.
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, delete, or manage Cloud Manager CVO/working environment in single or HA mode for Azure.
+
+options:
+
+ state:
+ description:
+ - Whether the specified Cloud Manager CVO for AZURE should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the Cloud Manager CVO for AZURE to manage.
+ type: str
+
+ subscription_id:
+ required: true
+ description:
+ - The ID of the Azure subscription.
+ type: str
+
+ instance_type:
+ description:
+ - The type of instance to use, which depends on the license type you chose.
+ - Explore ['Standard_DS3_v2'].
+ - Standard ['Standard_DS4_v2, Standard_DS13_v2, Standard_L8s_v2'].
+ - Premium ['Standard_DS5_v2', 'Standard_DS14_v2'].
+ - For more supported instance types, refer to Cloud Volumes ONTAP Release Notes.
+ type: str
+ default: Standard_DS4_v2
+
+ license_type:
+ description:
+ - The type of license to use.
+ - For single node by Capacity ['capacity-paygo'].
+ - For single node by Node paygo ['azure-cot-explore-paygo', 'azure-cot-standard-paygo', 'azure-cot-premium-paygo'].
+ - For single node by Node byol ['azure-cot-premium-byol'].
+ - For HA by Capacity ['ha-capacity-paygo'].
+ - For HA by Node paygo ['azure-ha-cot-standard-paygo', 'azure-ha-cot-premium-paygo'].
+ - For HA by Node byol ['azure-ha-cot-premium-byol'].
+ choices: ['azure-cot-standard-paygo', 'azure-cot-premium-paygo', 'azure-cot-premium-byol', \
+ 'azure-cot-explore-paygo', 'azure-ha-cot-standard-paygo', 'azure-ha-cot-premium-paygo', \
+ 'azure-ha-cot-premium-byol', 'capacity-paygo', 'ha-capacity-paygo']
+ default: 'capacity-paygo'
+ type: str
+
+ provided_license:
+ description:
+ - Using a NLF license file for BYOL deployment.
+ type: str
+
+ capacity_package_name:
+ description:
+ - Capacity package name is required when selecting a capacity based license.
+ - Essential only available with Bring Your Own License Capacity-Based.
+ - Professional available as an annual contract from a cloud provider or Bring Your Own License Capacity-Based.
+ choices: ['Professional', 'Essential', 'Freemium']
+ default: 'Essential'
+ type: str
+ version_added: 21.12.0
+
+ workspace_id:
+ description:
+ - The ID of the Cloud Manager workspace where you want to deploy Cloud Volumes ONTAP.
+ - If not provided, Cloud Manager uses the first workspace.
+ - You can find the ID from the Workspace tab on [https://cloudmanager.netapp.com].
+ type: str
+
+ subnet_id:
+ required: true
+ description:
+ - The name of the subnet for the Cloud Volumes ONTAP system.
+ type: str
+
+ vnet_id:
+ required: true
+ description:
+ - The name of the virtual network.
+ type: str
+
+ vnet_resource_group:
+ description:
+ - The resource group in Azure associated to the virtual network.
+ type: str
+
+ resource_group:
+ description:
+ - The resource_group where Cloud Volumes ONTAP will be created.
+ - If not provided, Cloud Manager generates the resource group name (name of the working environment/CVO with suffix '-rg').
+ - If the resource group does not exist, it is created.
+ type: str
+
+ allow_deploy_in_existing_rg:
+ description:
+ - Indicates if to allow creation in existing resource group.
+ type: bool
+ default: false
+
+ cidr:
+ required: true
+ description:
+ - The CIDR of the VNET. If not provided, resource needs az login to authorize and fetch the cidr details from Azure.
+ type: str
+
+ location:
+ required: true
+ description:
+ - The location where the working environment will be created.
+ type: str
+
+ data_encryption_type:
+ description:
+ - The type of encryption to use for the working environment.
+ choices: ['AZURE', 'NONE']
+ default: 'AZURE'
+ type: str
+
+ azure_encryption_parameters:
+ description:
+ - AZURE encryption parameters. It is required if using AZURE encryption.
+ type: str
+ version_added: 21.10.0
+
+ storage_type:
+ description:
+ - The type of storage for the first data aggregate.
+ choices: ['Premium_LRS', 'Standard_LRS', 'StandardSSD_LRS', 'Premium_ZRS']
+ default: 'Premium_LRS'
+ type: str
+
+ client_id:
+ required: true
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ - You can find the ID from the Connector tab on [https://cloudmanager.netapp.com].
+ type: str
+
+ disk_size:
+ description:
+ - Azure volume size for the first data aggregate.
+ - For GB, the value can be [100, 500].
+ - For TB, the value can be [1,2,4,8,16].
+ default: 1
+ type: int
+
+ disk_size_unit:
+ description:
+ - The unit for disk size.
+ choices: ['GB', 'TB']
+ default: 'TB'
+ type: str
+
+ security_group_id:
+ description:
+ - The ID of the security group for the working environment. If not provided, Cloud Manager creates the security group.
+ type: str
+
+ svm_password:
+ required: true
+ description:
+ - The admin password for Cloud Volumes ONTAP.
+ - It will be updated on each run.
+ type: str
+
+ svm_name:
+ description:
+ - The name of the SVM.
+ type: str
+ version_added: 21.22.0
+
+ ontap_version:
+ description:
+ - The required ONTAP version. Ignored if 'use_latest_version' is set to true.
+ type: str
+ default: 'latest'
+
+ use_latest_version:
+ description:
+ - Indicates whether to use the latest available ONTAP version.
+ type: bool
+ default: true
+
+ serial_number:
+ description:
+ - The serial number for the cluster.
+ - Required when using one of these, 'azure-cot-premium-byol' or 'azure-ha-cot-premium-byol'.
+ type: str
+
+ tier_level:
+ description:
+ - If capacity_tier is Blob, this argument indicates the tiering level.
+ choices: ['normal', 'cool']
+ default: 'normal'
+ type: str
+
+ nss_account:
+ description:
+ - The NetApp Support Site account ID to use with this Cloud Volumes ONTAP system.
+ - If the license type is BYOL and an NSS account isn't provided, Cloud Manager tries to use the first existing NSS account.
+ type: str
+
+ writing_speed_state:
+ description:
+ - The write speed setting for Cloud Volumes ONTAP ['NORMAL','HIGH'].
+ - This argument is not relevant for HA pairs.
+ type: str
+
+ capacity_tier:
+ description:
+ - Whether to enable data tiering for the first data aggregate.
+ choices: ['Blob', 'NONE']
+ default: 'Blob'
+ type: str
+
+ cloud_provider_account:
+ description:
+ - The cloud provider credentials id to use when deploying the Cloud Volumes ONTAP system.
+ - You can find the ID in Cloud Manager from the Settings > Credentials page.
+ - If not specified, Cloud Manager uses the instance profile of the Connector.
+ type: str
+
+ backup_volumes_to_cbs:
+ description:
+ - Automatically enable back up of all volumes to S3.
+ default: false
+ type: bool
+
+ enable_compliance:
+ description:
+ - Enable the Cloud Compliance service on the working environment.
+ default: false
+ type: bool
+
+ enable_monitoring:
+ description:
+ - Enable the Monitoring service on the working environment.
+ default: false
+ type: bool
+
+ azure_tag:
+ description:
+ - Additional tags for the AZURE CVO working environment.
+ type: list
+ elements: dict
+ suboptions:
+ tag_key:
+ description: The key of the tag.
+ type: str
+ tag_value:
+ description: The tag value.
+ type: str
+ is_ha:
+ description:
+ - Indicate whether the working environment is an HA pair or not.
+ type: bool
+ default: false
+
+ platform_serial_number_node1:
+ description:
+ - For HA BYOL, the serial number for the first node.
+ type: str
+
+ platform_serial_number_node2:
+ description:
+ - For HA BYOL, the serial number for the second node.
+ type: str
+
+ ha_enable_https:
+ description:
+ - For HA, enable the HTTPS connection from CVO to storage accounts. This can impact write performance. The default is false.
+ type: bool
+ version_added: 21.10.0
+
+ upgrade_ontap_version:
+ description:
+ - Indicates whether to upgrade ONTAP image on the CVO.
+ - If the current version already matches the desired version, no action is taken.
+ type: bool
+ default: false
+ version_added: 21.13.0
+
+ update_svm_password:
+ description:
+ - Indicates whether to update svm_password on the CVO.
+ - When set to true, the module is not idempotent, as we cannot read the current password.
+ type: bool
+ default: false
+ version_added: 21.13.0
+
+ availability_zone:
+ description:
+ - The availability zone on the location configuration.
+ type: int
+ version_added: 21.20.0
+
+ availability_zone_node1:
+ description:
+ - The node1 availability zone on the location configuration for HA.
+ type: int
+ version_added: 21.21.0
+
+ availability_zone_node2:
+ description:
+ - The node2 availability zone on the location configuration for HA.
+ type: int
+ version_added: 21.21.0
+'''
+
+EXAMPLES = """
+- name: create NetApp Cloud Manager CVO for Azure single
+ netapp.cloudmanager.na_cloudmanager_cvo_azure:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: AnsibleCVO
+ location: westus
+ subnet_id: subnet-xxxxxxx
+ vnet_id: vnetxxxxxxxx
+ svm_password: P@assword!
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+ writing_speed_state: NORMAL
+ azure_tag: [
+ {tag_key: abc,
+ tag_value: a123}]
+
+- name: create NetApp Cloud Manager CVO for Azure HA
+ netapp.cloudmanager.na_cloudmanager_cvo_azure:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: AnsibleCVO
+ location: westus
+ subnet_id: subnet-xxxxxxx
+ vnet_id: vnetxxxxxxxx
+ svm_password: P@assword!
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+ writing_speed_state: NORMAL
+ azure_tag: [
+ {tag_key: abc,
+ tag_value: a123}]
+ is_ha: true
+
+- name: delete NetApp Cloud Manager cvo for Azure
+ netapp.cloudmanager.na_cloudmanager_cvo_azure:
+ state: absent
+ name: ansible
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ location: westus
+ subnet_id: subnet-xxxxxxx
+ vnet_id: vnetxxxxxxxx
+ svm_password: P@assword!
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+"""
+
+RETURN = '''
+working_environment_id:
+ description: Newly created AZURE CVO working_environment_id.
+ type: str
+ returned: success
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+
+AZURE_License_Types = ['azure-cot-standard-paygo', 'azure-cot-premium-paygo', 'azure-cot-premium-byol', 'azure-cot-explore-paygo',
+ 'azure-ha-cot-standard-paygo', 'azure-ha-cot-premium-paygo', 'azure-ha-cot-premium-byol', 'capacity-paygo', 'ha-capacity-paygo']
+
+
+class NetAppCloudManagerCVOAZURE:
+ """ object initialize and class methods """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ instance_type=dict(required=False, type='str', default='Standard_DS4_v2'),
+ license_type=dict(required=False, type='str', choices=AZURE_License_Types, default='capacity-paygo'),
+ workspace_id=dict(required=False, type='str'),
+ capacity_package_name=dict(required=False, type='str', choices=['Professional', 'Essential', 'Freemium'], default='Essential'),
+ provided_license=dict(required=False, type='str'),
+ subnet_id=dict(required=True, type='str'),
+ vnet_id=dict(required=True, type='str'),
+ vnet_resource_group=dict(required=False, type='str'),
+ resource_group=dict(required=False, type='str'),
+ cidr=dict(required=True, type='str'),
+ location=dict(required=True, type='str'),
+ subscription_id=dict(required=True, type='str'),
+ data_encryption_type=dict(required=False, type='str', choices=['AZURE', 'NONE'], default='AZURE'),
+ azure_encryption_parameters=dict(required=False, type='str', no_log=True),
+ storage_type=dict(required=False, type='str', choices=['Premium_LRS', 'Standard_LRS', 'StandardSSD_LRS', 'Premium_ZRS'], default='Premium_LRS'),
+ disk_size=dict(required=False, type='int', default=1),
+ disk_size_unit=dict(required=False, type='str', choices=['GB', 'TB'], default='TB'),
+ svm_password=dict(required=True, type='str', no_log=True),
+ svm_name=dict(required=False, type='str'),
+ ontap_version=dict(required=False, type='str', default='latest'),
+ use_latest_version=dict(required=False, type='bool', default=True),
+ tier_level=dict(required=False, type='str', choices=['normal', 'cool'], default='normal'),
+ nss_account=dict(required=False, type='str'),
+ writing_speed_state=dict(required=False, type='str'),
+ capacity_tier=dict(required=False, type='str', choices=['Blob', 'NONE'], default='Blob'),
+ security_group_id=dict(required=False, type='str'),
+ cloud_provider_account=dict(required=False, type='str'),
+ backup_volumes_to_cbs=dict(required=False, type='bool', default=False),
+ enable_compliance=dict(required=False, type='bool', default=False),
+ enable_monitoring=dict(required=False, type='bool', default=False),
+ allow_deploy_in_existing_rg=dict(required=False, type='bool', default=False),
+ client_id=dict(required=True, type='str'),
+ azure_tag=dict(required=False, type='list', elements='dict', options=dict(
+ tag_key=dict(type='str', no_log=False),
+ tag_value=dict(type='str')
+ )),
+ serial_number=dict(required=False, type='str'),
+ is_ha=dict(required=False, type='bool', default=False),
+ platform_serial_number_node1=dict(required=False, type='str'),
+ platform_serial_number_node2=dict(required=False, type='str'),
+ ha_enable_https=dict(required=False, type='bool'),
+ upgrade_ontap_version=dict(required=False, type='bool', default=False),
+ update_svm_password=dict(required=False, type='bool', default=False),
+ availability_zone=dict(required=False, type='int'),
+ availability_zone_node1=dict(required=False, type='int'),
+ availability_zone_node2=dict(required=False, type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ required_if=[
+ ['license_type', 'capacity-paygo', ['capacity_package_name']],
+ ['license_type', 'ha-capacity-paygo', ['capacity_package_name']],
+ ['license_type', 'azure-cot-premium-byol', ['serial_number']],
+ ['license_type', 'azure-ha-cot-premium-byol', ['platform_serial_number_node1', 'platform_serial_number_node2']],
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.changeable_params = ['svm_password', 'svm_name', 'azure_tag', 'tier_level', 'ontap_version',
+ 'instance_type', 'license_type', 'writing_speed_state']
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.rest_api.api_root_path = '/occm/api/azure/%s' % ('ha' if self.parameters['is_ha'] else 'vsa')
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+
+ def create_cvo_azure(self):
+ """
+ Create AZURE CVO
+ """
+ if self.parameters.get('workspace_id') is None:
+ response, msg = self.na_helper.get_tenant(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['workspace_id'] = response
+
+ if self.parameters.get('nss_account') is None:
+ if self.parameters.get('serial_number') is not None:
+ if not self.parameters['serial_number'].startswith('Eval-') and self.parameters['license_type'] == 'azure-cot-premium-byol':
+ response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['nss_account'] = response
+ elif self.parameters.get('platform_serial_number_node1') is not None and self.parameters.get('platform_serial_number_node2') is not None:
+ if not self.parameters['platform_serial_number_node1'].startswith('Eval-')\
+ and not self.parameters['platform_serial_number_node2'].startswith('Eval-')\
+ and self.parameters['license_type'] == 'azure-ha-cot-premium-byol':
+ response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['nss_account'] = response
+
+ json = {"name": self.parameters['name'],
+ "region": self.parameters['location'],
+ "subscriptionId": self.parameters['subscription_id'],
+ "tenantId": self.parameters['workspace_id'],
+ "storageType": self.parameters['storage_type'],
+ "dataEncryptionType": self.parameters['data_encryption_type'],
+ "optimizedNetworkUtilization": True,
+ "diskSize": {
+ "size": self.parameters['disk_size'],
+ "unit": self.parameters['disk_size_unit']},
+ "svmPassword": self.parameters['svm_password'],
+ "backupVolumesToCbs": self.parameters['backup_volumes_to_cbs'],
+ "enableCompliance": self.parameters['enable_compliance'],
+ "enableMonitoring": self.parameters['enable_monitoring'],
+ "vsaMetadata": {
+ "ontapVersion": self.parameters['ontap_version'],
+ "licenseType": self.parameters['license_type'],
+ "useLatestVersion": self.parameters['use_latest_version'],
+ "instanceType": self.parameters['instance_type']}
+ }
+
+ if self.parameters['capacity_tier'] == "Blob":
+ json.update({"capacityTier": self.parameters['capacity_tier'],
+ "tierLevel": self.parameters['tier_level']})
+
+ if self.parameters.get('provided_license') is not None:
+ json['vsaMetadata'].update({"providedLicense": self.parameters['provided_license']})
+
+ # clean default value if it is not by Capacity license
+ if not self.parameters['license_type'].endswith('capacity-paygo'):
+ json['vsaMetadata'].update({"capacityPackageName": ''})
+
+ if self.parameters.get('capacity_package_name') is not None:
+ json['vsaMetadata'].update({"capacityPackageName": self.parameters['capacity_package_name']})
+
+ if self.parameters.get('cidr') is not None:
+ json.update({"cidr": self.parameters['cidr']})
+
+ if self.parameters.get('writing_speed_state') is not None:
+ json.update({"writingSpeedState": self.parameters['writing_speed_state'].upper()})
+
+ if self.parameters.get('resource_group') is not None:
+ json.update({"resourceGroup": self.parameters['resource_group'],
+ "allowDeployInExistingRg": self.parameters['allow_deploy_in_existing_rg']})
+ else:
+ json.update({"resourceGroup": (self.parameters['name'] + '-rg')})
+
+ if self.parameters.get('serial_number') is not None:
+ json.update({"serialNumber": self.parameters['serial_number']})
+
+ if self.parameters.get('security_group_id') is not None:
+ json.update({"securityGroupId": self.parameters['security_group_id']})
+
+ if self.parameters.get('cloud_provider_account') is not None:
+ json.update({"cloudProviderAccount": self.parameters['cloud_provider_account']})
+
+ if self.parameters.get('backup_volumes_to_cbs') is not None:
+ json.update({"backupVolumesToCbs": self.parameters['backup_volumes_to_cbs']})
+
+ if self.parameters.get('nss_account') is not None:
+ json.update({"nssAccount": self.parameters['nss_account']})
+
+ if self.parameters.get('availability_zone') is not None:
+ json.update({"availabilityZone": self.parameters['availability_zone']})
+
+ if self.parameters['data_encryption_type'] == "AZURE":
+ if self.parameters.get('azure_encryption_parameters') is not None:
+ json.update({"azureEncryptionParameters": {"key": self.parameters['azure_encryption_parameters']}})
+
+ if self.parameters.get('svm_name') is not None:
+ json.update({"svmName": self.parameters['svm_name']})
+
+ if self.parameters.get('azure_tag') is not None:
+ tags = []
+ for each_tag in self.parameters['azure_tag']:
+ tag = {
+ 'tagKey': each_tag['tag_key'],
+ 'tagValue': each_tag['tag_value']
+ }
+
+ tags.append(tag)
+ json.update({"azureTags": tags})
+
+ if self.parameters['is_ha']:
+ ha_params = dict()
+
+ if self.parameters.get('platform_serial_number_node1'):
+ ha_params["platformSerialNumberNode1"] = self.parameters['platform_serial_number_node1']
+
+ if self.parameters.get('platform_serial_number_node2'):
+ ha_params["platformSerialNumberNode2"] = self.parameters['platform_serial_number_node2']
+
+ if self.parameters.get('availability_zone_node1'):
+ ha_params["availabilityZoneNode1"] = self.parameters['availability_zone_node1']
+
+ if self.parameters.get('availability_zone_node2'):
+ ha_params["availabilityZoneNode2"] = self.parameters['availability_zone_node2']
+
+ if self.parameters.get('ha_enable_https') is not None:
+ ha_params['enableHttps'] = self.parameters['ha_enable_https']
+
+ json["haParams"] = ha_params
+
+ resource_group = self.parameters['vnet_resource_group'] if self.parameters.get(
+ 'vnet_resource_group') is not None else self.parameters['resource_group']
+
+ resource_group_path = 'subscriptions/%s/resourceGroups/%s' % (self.parameters['subscription_id'], resource_group)
+ vnet_format = '%s/%s' if self.rest_api.simulator else '/%s/providers/Microsoft.Network/virtualNetworks/%s'
+ vnet = vnet_format % (resource_group_path, self.parameters['vnet_id'])
+ json.update({"vnetId": vnet})
+ json.update({"subnetId": '%s/subnets/%s' % (vnet, self.parameters['subnet_id'])})
+
+ api_url = '%s/working-environments' % self.rest_api.api_root_path
+ response, error, on_cloud_request_id = self.rest_api.post(api_url, json, header=self.headers)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: unexpected response on creating cvo azure: %s, %s" % (str(error), str(response)))
+ working_environment_id = response['publicId']
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "create", 60, 60)
+
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating CVO AZURE: %s" % str(err))
+
+ return working_environment_id
+
+ def get_extra_azure_tags(self, rest_api, headers):
+ # Get extra azure tag from current working environment
+ # It is created automatically not from the user input
+ we, err = self.na_helper.get_working_environment_details(rest_api, headers)
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response to get CVO AZURE details: %s" % str(err))
+ return [{'tag_key': 'DeployedByOccm', 'tag_value': we['userTags']['DeployedByOccm']}] if 'DeployedByOccm' in \
+ we['userTags'] else []
+
+ def update_cvo_azure(self, working_environment_id, modify):
+ base_url = '%s/working-environments/%s/' % (self.rest_api.api_root_path, working_environment_id)
+ for item in modify:
+ if item == 'svm_password':
+ response, error = self.na_helper.update_svm_password(base_url, self.rest_api, self.headers, self.parameters['svm_password'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'svm_name':
+ response, error = self.na_helper.update_svm_name(base_url, self.rest_api, self.headers, self.parameters['svm_name'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'azure_tag':
+ # default azure tag
+ tag_list = self.get_extra_azure_tags(self.rest_api, self.headers)
+ if 'azure_tag' in self.parameters:
+ tag_list.extend(self.parameters['azure_tag'])
+ response, error = self.na_helper.update_cvo_tags(base_url, self.rest_api, self.headers, 'azure_tag', tag_list)
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'tier_level':
+ response, error = self.na_helper.update_tier_level(base_url, self.rest_api, self.headers, self.parameters['tier_level'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'writing_speed_state':
+ response, error = self.na_helper.update_writing_speed_state(base_url, self.rest_api, self.headers, self.parameters['writing_speed_state'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'ontap_version':
+ response, error = self.na_helper.upgrade_ontap_image(self.rest_api, self.headers, self.parameters['ontap_version'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'instance_type' or item == 'license_type':
+ response, error = self.na_helper.update_instance_license_type(base_url, self.rest_api, self.headers,
+ self.parameters['instance_type'],
+ self.parameters['license_type'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ def delete_cvo_azure(self, we_id):
+ """
+ Delete AZURE CVO
+ """
+
+ api_url = '%s/working-environments/%s' % (self.rest_api.api_root_path, we_id)
+ response, error, on_cloud_request_id = self.rest_api.delete(api_url, None, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on deleting cvo azure: %s, %s" % (str(error), str(response)))
+
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "delete", 40, 60)
+
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response wait_on_completion for deleting CVO AZURE: %s" % str(err))
+
+ def validate_cvo_params(self):
+ if self.parameters['use_latest_version'] is True and self.parameters['ontap_version'] != "latest":
+ self.module.fail_json(msg="ontap_version parameter not required when having use_latest_version as true")
+
+ if self.parameters.get('serial_number') is None and self.parameters['license_type'] == "azure-cot-premium-byol":
+ self.module.fail_json(msg="serial_number parameter required when having license_type as azure-cot-premium-byol")
+
+ if self.parameters['is_ha'] and self.parameters['license_type'] == "azure-ha-cot-premium-byol":
+ if self.parameters.get('platform_serial_number_node1') is None or self.parameters.get('platform_serial_number_node2') is None:
+ self.module.fail_json(msg="both platform_serial_number_node1 and platform_serial_number_node2 parameters are required"
+ "when having ha type as true and license_type as azure-ha-cot-premium-byol")
+ if self.parameters['is_ha'] is True and self.parameters['license_type'] == 'capacity-paygo':
+ self.parameters['license_type'] == 'ha-capacity-paygo'
+
+ def apply(self):
+ """
+ Apply action to the Cloud Manager CVO for AZURE
+ :return: None
+ """
+ working_environment_id = None
+ modify = None
+ current, dummy = self.na_helper.get_working_environment_details_by_name(self.rest_api, self.headers,
+ self.parameters['name'], "azure")
+ if current:
+ self.parameters['working_environment_id'] = current['publicId']
+ # check the action whether to create, delete, or not
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if current and self.parameters['state'] != 'absent':
+ working_environment_id = current['publicId']
+ modify, error = self.na_helper.is_cvo_update_needed(self.rest_api, self.headers, self.parameters, self.changeable_params, 'azure')
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == "create":
+ self.validate_cvo_params()
+ working_environment_id = self.create_cvo_azure()
+ elif cd_action == "delete":
+ self.delete_cvo_azure(current['publicId'])
+ else:
+ self.update_cvo_azure(current['publicId'], modify)
+
+ self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id)
+
+
+def main():
+ """
+ Create Cloud Manager CVO for AZURE class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppCloudManagerCVOAZURE()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_gcp.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_gcp.py
new file mode 100644
index 000000000..7abbca823
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_gcp.py
@@ -0,0 +1,858 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_cvo_gcp
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_cvo_gcp
+short_description: NetApp Cloud Manager CVO for GCP
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create, delete, or manage Cloud Manager CVO for GCP.
+
+options:
+
+ backup_volumes_to_cbs:
+ description:
+ - Automatically backup all volumes to cloud.
+ default: false
+ type: bool
+
+ capacity_tier:
+ description:
+ - Whether to enable data tiering for the first data aggregate.
+ choices: ['cloudStorage']
+ type: str
+
+ client_id:
+ required: true
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ - You can find the ID from the Connector tab on U(https://cloudmanager.netapp.com).
+ type: str
+
+ data_encryption_type:
+ description:
+ - Type of encryption to use for this working environment.
+ choices: ['GCP']
+ type: str
+
+ gcp_encryption_parameters:
+ description:
+ - The GCP encryption parameters.
+ type: str
+ version_added: 21.10.0
+
+ enable_compliance:
+ description:
+ - Enable the Cloud Compliance service on the working environment.
+ default: false
+ type: bool
+
+ firewall_rule:
+ description:
+ - Firewall name for a single node cluster.
+ type: str
+
+ gcp_labels:
+ description:
+ - Optionally provide up to four key-value pairs with which to all GCP entities created by Cloud Manager.
+ type: list
+ elements: dict
+ suboptions:
+ label_key:
+ description: The key of the label.
+ type: str
+ label_value:
+ description: The label value.
+ type: str
+
+ gcp_service_account:
+ description:
+ - The gcp_service_account email in order to enable tiering of cold data to Google Cloud Storage.
+ required: true
+ type: str
+
+ gcp_volume_size:
+ description:
+ - GCP volume size.
+ type: int
+
+ gcp_volume_size_unit:
+ description:
+ - GCP volume size unit.
+ choices: ['GB', 'TB']
+ type: str
+
+ gcp_volume_type:
+ description:
+ - GCP volume type.
+ choices: ['pd-balanced', 'pd-standard', 'pd-ssd']
+ type: str
+
+ instance_type:
+ description:
+ - The type of instance to use, which depends on the license type you choose.
+ - Explore ['custom-4-16384'].
+ - Standard ['n1-standard-8'].
+ - Premium ['n1-standard-32'].
+ - BYOL all instance types defined for PayGo.
+ - For more supported instance types, refer to Cloud Volumes ONTAP Release Notes.
+ default: 'n1-standard-8'
+ type: str
+
+ is_ha:
+ description:
+ - Indicate whether the working environment is an HA pair or not.
+ type: bool
+ default: false
+
+ license_type:
+ description:
+ - The type of license to use.
+ - For single node by Capacity ['capacity-paygo'].
+ - For single node by Node paygo ['gcp-cot-explore-paygo', 'gcp-cot-standard-paygo', 'gcp-cot-premium-paygo'].
+ - For single node by Node byol ['gcp-cot-premium-byol'].
+ - For HA by Capacity ['ha-capacity-paygo'].
+ - For HA by Node paygo ['gcp-ha-cot-explore-paygo', 'gcp-ha-cot-standard-paygo', 'gcp-ha-cot-premium-paygo'].
+ - For HA by Node byol ['gcp-cot-premium-byol'].
+ choices: ['gcp-cot-standard-paygo', 'gcp-cot-explore-paygo', 'gcp-cot-premium-paygo', 'gcp-cot-premium-byol', \
+ 'gcp-ha-cot-standard-paygo', 'gcp-ha-cot-premium-paygo', 'gcp-ha-cot-explore-paygo', 'gcp-ha-cot-premium-byol', \
+ 'capacity-paygo', 'ha-capacity-paygo']
+ type: str
+ default: 'capacity-paygo'
+
+ provided_license:
+ description:
+ - Using a NLF license file for BYOL deployment
+ type: str
+
+ capacity_package_name:
+ description:
+ - Capacity package name is required when selecting a capacity based license.
+ choices: ['Professional', 'Essential', 'Freemium']
+ default: 'Essential'
+ type: str
+ version_added: 21.12.0
+
+ mediator_zone:
+ description:
+ - The zone for mediator.
+ - Option for HA pair only.
+ type: str
+
+ name:
+ description:
+ - The name of the Cloud Manager CVO for GCP to manage.
+ required: true
+ type: str
+
+ network_project_id:
+ description:
+ - The project id in GCP associated with the Subnet.
+ - If not provided, it is assumed that the Subnet is within the previously specified project id.
+ type: str
+
+ node1_zone:
+ description:
+ - Zone for node 1.
+ - Option for HA pair only.
+ type: str
+
+ node2_zone:
+ description:
+ - Zone for node 2.
+ - Option for HA pair only.
+ type: str
+
+ nss_account:
+ description:
+ - The NetApp Support Site account ID to use with this Cloud Volumes ONTAP system.
+ - If the license type is BYOL and an NSS account isn't provided, Cloud Manager tries to use the first existing NSS account.
+ type: str
+
+ ontap_version:
+ description:
+ - The required ONTAP version. Ignored if 'use_latest_version' is set to true.
+ type: str
+ default: 'latest'
+
+ platform_serial_number_node1:
+ description:
+ - For HA BYOL, the serial number for the first node.
+ - Option for HA pair only.
+ type: str
+
+ platform_serial_number_node2:
+ description:
+ - For HA BYOL, the serial number for the second node.
+ - Option for HA pair only.
+ type: str
+
+ project_id:
+ description:
+ - The ID of the GCP project.
+ required: true
+ type: str
+
+ platform_serial_number:
+ description:
+ - The serial number for the system. Required when using 'gcp-cot-premium-byol'.
+ type: str
+
+ state:
+ description:
+ - Whether the specified Cloud Manager CVO for GCP should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ subnet_id:
+ description:
+ - The name of the subnet for Cloud Volumes ONTAP.
+ type: str
+
+ subnet0_node_and_data_connectivity:
+ description:
+ - Subnet path for nic1, required for node and data connectivity.
+ - If using shared VPC, network_project_id must be provided.
+ - Option for HA pair only.
+ type: str
+
+ subnet1_cluster_connectivity:
+ description:
+ - Subnet path for nic2, required for cluster connectivity.
+ - Option for HA pair only.
+ type: str
+
+ subnet2_ha_connectivity:
+ description:
+ - Subnet path for nic3, required for HA connectivity.
+ - Option for HA pair only.
+ type: str
+
+ subnet3_data_replication:
+ description:
+ - Subnet path for nic4, required for HA connectivity.
+ - Option for HA pair only.
+ type: str
+
+ svm_password:
+ description:
+ - The admin password for Cloud Volumes ONTAP.
+ - It will be updated on each run.
+ type: str
+
+ svm_name:
+ description:
+ - The name of the SVM.
+ type: str
+ version_added: 21.22.0
+
+ tier_level:
+ description:
+ - The tiering level when 'capacity_tier' is set to 'cloudStorage'.
+ choices: ['standard', 'nearline', 'coldline']
+ default: 'standard'
+ type: str
+
+ use_latest_version:
+ description:
+ - Indicates whether to use the latest available ONTAP version.
+ type: bool
+ default: true
+
+ vpc_id:
+ required: true
+ description:
+ - The name of the VPC.
+ type: str
+
+ vpc0_firewall_rule_name:
+ description:
+ - Firewall rule name for vpc1.
+ - Option for HA pair only.
+ type: str
+
+ vpc0_node_and_data_connectivity:
+ description:
+ - VPC path for nic1, required for node and data connectivity.
+ - If using shared VPC, network_project_id must be provided.
+ - Option for HA pair only.
+ type: str
+
+ vpc1_cluster_connectivity:
+ description:
+ - VPC path for nic2, required for cluster connectivity.
+ - Option for HA pair only.
+ type: str
+
+ vpc1_firewall_rule_name:
+ description:
+ - Firewall rule name for vpc2.
+ - Option for HA pair only.
+ type: str
+
+ vpc2_ha_connectivity:
+ description:
+ - VPC path for nic3, required for HA connectivity.
+ - Option for HA pair only.
+ type: str
+
+ vpc2_firewall_rule_name:
+ description:
+ - Firewall rule name for vpc3.
+ - Option for HA pair only.
+ type: str
+
+ vpc3_data_replication:
+ description:
+ - VPC path for nic4, required for data replication.
+ - Option for HA pair only.
+ type: str
+
+ vpc3_firewall_rule_name:
+ description:
+ - Firewall rule name for vpc4.
+ - Option for HA pair only.
+ type: str
+
+ workspace_id:
+ description:
+ - The ID of the Cloud Manager workspace where you want to deploy Cloud Volumes ONTAP.
+ - If not provided, Cloud Manager uses the first workspace.
+ - You can find the ID from the Workspace tab on [https://cloudmanager.netapp.com].
+ type: str
+
+ writing_speed_state:
+ description:
+ - The write speed setting for Cloud Volumes ONTAP ['NORMAL','HIGH'].
+ - Default value is 'NORMAL' for non-HA GCP CVO
+ - This argument is not relevant for HA pairs.
+ type: str
+
+ zone:
+ description:
+ - The zone of the region where the working environment will be created.
+ required: true
+ type: str
+
+ upgrade_ontap_version:
+ description:
+ - Indicates whether to upgrade ONTAP image on the CVO.
+ - If the current version already matches the desired version, no action is taken.
+ type: bool
+ default: false
+ version_added: 21.13.0
+
+ update_svm_password:
+ description:
+ - Indicates whether to update svm_password on the CVO.
+ - When set to true, the module is not idempotent, as we cannot read the current password.
+ type: bool
+ default: false
+ version_added: 21.13.0
+
+ subnet_path:
+ description:
+ - Subnet path for a single node cluster.
+ type: str
+ version_added: 21.20.0
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = """
+
+- name: Create NetApp Cloud Manager cvo for GCP
+ netapp.cloudmanager.na_cloudmanager_cvo_gcp:
+ state: present
+ name: ansiblecvogcp
+ project_id: default-project
+ zone: us-east4-b
+ subnet_path: projects/<project>/regions/<region>/subnetworks/<subnetwork>
+ subnet_id: projects/<project>/regions/<region>/subnetworks/<subnetwork>
+ gcp_volume_type: pd-ssd
+ gcp_volume_size: 500
+ gcp_volume_size_unit: GB
+ gcp_service_account: "{{ xxxxxxxxxxxxxxx }}"
+ data_encryption_type: GCP
+ svm_password: "{{ xxxxxxxxxxxxxxx }}"
+ ontap_version: latest
+ use_latest_version: true
+ license_type: capacity-paygo
+ instance_type: n1-standard-8
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+ workspace_id: "{{ xxxxxxxxxxxxxxx }}"
+ capacity_tier: cloudStorage
+ writing_speed_state: NORMAL
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ vpc_id: default
+ gcp_labels:
+ - label_key: key1
+ label_value: value1
+ - label_key: key2
+ label_value: value2
+
+- name: Create NetApp Cloud Manager cvo ha for GCP
+ netapp.cloudmanager.na_cloudmanager_cvo_gcp:
+ state: present
+ name: ansiblecvogcpha
+ project_id: "default-project"
+ zone: us-east1-b
+ gcp_volume_type: pd-ssd
+ gcp_volume_size: 500
+ gcp_volume_size_unit: GB
+ gcp_service_account: "{{ xxxxxxxxxxxxxxx }}"
+ data_encryption_type: GCP
+ svm_password: "{{ xxxxxxxxxxxxxxx }}"
+ ontap_version: ONTAP-9.9.0.T1.gcpha
+ use_latest_version: false
+ license_type: ha-capacity-paygo
+ instance_type: custom-4-16384
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+ workspace_id: "{{ xxxxxxxxxxxxxxx }}"
+ capacity_tier: cloudStorage
+ writing_speed_state: NORMAL
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ is_ha: true
+ mediator_zone: us-east1-b
+ node1_zone: us-east1-b
+ node2_zone: us-east1-b
+ subnet0_node_and_data_connectivity: default
+ subnet1_cluster_connectivity: subnet2
+ subnet2_ha_connectivity: subnet3
+ subnet3_data_replication: subnet1
+ vpc0_node_and_data_connectivity: default
+ vpc1_cluster_connectivity: vpc2
+ vpc2_ha_connectivity: vpc3
+ vpc3_data_replication: vpc1
+ vpc_id: default
+ subnet_id: default
+
+"""
+
+RETURN = '''
+working_environment_id:
+ description: Newly created GCP CVO working_environment_id.
+ type: str
+ returned: success
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+
+GCP_LICENSE_TYPES = ["gcp-cot-standard-paygo", "gcp-cot-explore-paygo", "gcp-cot-premium-paygo", "gcp-cot-premium-byol",
+ "gcp-ha-cot-standard-paygo", "gcp-ha-cot-premium-paygo", "gcp-ha-cot-explore-paygo",
+ "gcp-ha-cot-premium-byol", "capacity-paygo", "ha-capacity-paygo"]
+GOOGLE_API_URL = "https://www.googleapis.com/compute/v1/projects"
+
+
+class NetAppCloudManagerCVOGCP:
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ backup_volumes_to_cbs=dict(required=False, type='bool', default=False),
+ capacity_tier=dict(required=False, type='str', choices=['cloudStorage']),
+ client_id=dict(required=True, type='str'),
+ data_encryption_type=dict(required=False, choices=['GCP'], type='str'),
+ gcp_encryption_parameters=dict(required=False, type='str', no_log=True),
+ enable_compliance=dict(required=False, type='bool', default=False),
+ firewall_rule=dict(required=False, type='str'),
+ gcp_labels=dict(required=False, type='list', elements='dict', options=dict(
+ label_key=dict(type='str', no_log=False),
+ label_value=dict(type='str')
+ )),
+ gcp_service_account=dict(required=True, type='str'),
+ gcp_volume_size=dict(required=False, type='int'),
+ gcp_volume_size_unit=dict(required=False, choices=['GB', 'TB'], type='str'),
+ gcp_volume_type=dict(required=False, choices=['pd-balanced', 'pd-standard', 'pd-ssd'], type='str'),
+ instance_type=dict(required=False, type='str', default='n1-standard-8'),
+ is_ha=dict(required=False, type='bool', default=False),
+ license_type=dict(required=False, type='str', choices=GCP_LICENSE_TYPES, default='capacity-paygo'),
+ mediator_zone=dict(required=False, type='str'),
+ name=dict(required=True, type='str'),
+ network_project_id=dict(required=False, type='str'),
+ node1_zone=dict(required=False, type='str'),
+ node2_zone=dict(required=False, type='str'),
+ nss_account=dict(required=False, type='str'),
+ ontap_version=dict(required=False, type='str', default='latest'),
+ platform_serial_number=dict(required=False, type='str'),
+ platform_serial_number_node1=dict(required=False, type='str'),
+ platform_serial_number_node2=dict(required=False, type='str'),
+ project_id=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ subnet_id=dict(required=False, type='str'),
+ subnet0_node_and_data_connectivity=dict(required=False, type='str'),
+ subnet1_cluster_connectivity=dict(required=False, type='str'),
+ subnet2_ha_connectivity=dict(required=False, type='str'),
+ subnet3_data_replication=dict(required=False, type='str'),
+ svm_password=dict(required=False, type='str', no_log=True),
+ svm_name=dict(required=False, type='str'),
+ tier_level=dict(required=False, type='str', choices=['standard', 'nearline', 'coldline'],
+ default='standard'),
+ use_latest_version=dict(required=False, type='bool', default=True),
+ capacity_package_name=dict(required=False, type='str', choices=['Professional', 'Essential', 'Freemium'], default='Essential'),
+ provided_license=dict(required=False, type='str'),
+ vpc_id=dict(required=True, type='str'),
+ vpc0_firewall_rule_name=dict(required=False, type='str'),
+ vpc0_node_and_data_connectivity=dict(required=False, type='str'),
+ vpc1_cluster_connectivity=dict(required=False, type='str'),
+ vpc1_firewall_rule_name=dict(required=False, type='str'),
+ vpc2_firewall_rule_name=dict(required=False, type='str'),
+ vpc2_ha_connectivity=dict(required=False, type='str'),
+ vpc3_data_replication=dict(required=False, type='str'),
+ vpc3_firewall_rule_name=dict(required=False, type='str'),
+ workspace_id=dict(required=False, type='str'),
+ writing_speed_state=dict(required=False, type='str'),
+ zone=dict(required=True, type='str'),
+ upgrade_ontap_version=dict(required=False, type='bool', default=False),
+ update_svm_password=dict(required=False, type='bool', default=False),
+ subnet_path=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ required_if=[
+ ['license_type', 'capacity-paygo', ['capacity_package_name']],
+ ['license_type', 'ha-capacity-paygo', ['capacity_package_name']],
+ ['license_type', 'gcp-cot-premium-byol', ['platform_serial_number']],
+ ['license_type', 'gcp-ha-cot-premium-byol', ['platform_serial_number_node1', 'platform_serial_number_node2']],
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.changeable_params = ['svm_password', 'svm_name', 'tier_level', 'gcp_labels', 'ontap_version',
+ 'instance_type', 'license_type', 'writing_speed_state']
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.rest_api.api_root_path = '/occm/api/gcp/%s' % ('ha' if self.parameters['is_ha'] else 'vsa')
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+
+ @staticmethod
+ def has_self_link(param):
+ return param.startswith(("https://www.googleapis.com/compute/", "projects/"))
+
+ def create_cvo_gcp(self):
+
+ if self.parameters.get('workspace_id') is None:
+ response, msg = self.na_helper.get_tenant(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['workspace_id'] = response
+
+ if self.parameters.get('nss_account') is None:
+ if self.parameters.get('platform_serial_number') is not None:
+ if not self.parameters['platform_serial_number'].startswith('Eval-'):
+ if self.parameters['license_type'] == 'gcp-cot-premium-byol' or self.parameters['license_type'] == 'gcp-ha-cot-premium-byol':
+ response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['nss_account'] = response
+
+ if self.parameters['is_ha'] is True and self.parameters['license_type'] == 'capacity-paygo':
+ self.parameters['license_type'] == 'ha-capacity-paygo'
+
+ json = {"name": self.parameters['name'],
+ "region": self.parameters['zone'],
+ "tenantId": self.parameters['workspace_id'],
+ "vpcId": self.parameters['vpc_id'],
+ "gcpServiceAccount": self.parameters['gcp_service_account'],
+ "gcpVolumeSize": {
+ "size": self.parameters['gcp_volume_size'],
+ "unit": self.parameters['gcp_volume_size_unit']},
+ "gcpVolumeType": self.parameters['gcp_volume_type'],
+ "svmPassword": self.parameters['svm_password'],
+ "backupVolumesToCbs": self.parameters['backup_volumes_to_cbs'],
+ "enableCompliance": self.parameters['enable_compliance'],
+ "vsaMetadata": {
+ "ontapVersion": self.parameters['ontap_version'],
+ "licenseType": self.parameters['license_type'],
+ "useLatestVersion": self.parameters['use_latest_version'],
+ "instanceType": self.parameters['instance_type']}
+ }
+
+ if self.parameters['is_ha'] is False:
+ if self.parameters.get('writing_speed_state') is None:
+ self.parameters['writing_speed_state'] = 'NORMAL'
+ json.update({'writingSpeedState': self.parameters['writing_speed_state'].upper()})
+
+ if self.parameters.get('data_encryption_type') is not None and self.parameters['data_encryption_type'] == "GCP":
+ json.update({'dataEncryptionType': self.parameters['data_encryption_type']})
+ if self.parameters.get('gcp_encryption_parameters') is not None:
+ json.update({"gcpEncryptionParameters": {"key": self.parameters['gcp_encryption_parameters']}})
+
+ if self.parameters.get('provided_license') is not None:
+ json['vsaMetadata'].update({"providedLicense": self.parameters['provided_license']})
+
+ # clean default value if it is not by Capacity license
+ if not self.parameters['license_type'].endswith('capacity-paygo'):
+ json['vsaMetadata'].update({"capacityPackageName": ''})
+
+ if self.parameters.get('capacity_package_name') is not None:
+ json['vsaMetadata'].update({"capacityPackageName": self.parameters['capacity_package_name']})
+
+ if self.parameters.get('project_id'):
+ json.update({'project': self.parameters['project_id']})
+
+ if self.parameters.get('nss_account'):
+ json.update({'nssAccount': self.parameters['nss_account']})
+
+ if self.parameters.get('subnet_id'):
+ json.update({'subnetId': self.parameters['subnet_id']})
+
+ if self.parameters.get('subnet_path'):
+ json.update({'subnetPath': self.parameters['subnet_path']})
+
+ if self.parameters.get('platform_serial_number') is not None:
+ json.update({"serialNumber": self.parameters['platform_serial_number']})
+
+ if self.parameters.get('capacity_tier') is not None and self.parameters['capacity_tier'] == "cloudStorage":
+ json.update({"capacityTier": self.parameters['capacity_tier'],
+ "tierLevel": self.parameters['tier_level']})
+
+ if self.parameters.get('svm_name') is not None:
+ json.update({"svmName": self.parameters['svm_name']})
+
+ if self.parameters.get('gcp_labels') is not None:
+ labels = []
+ for each_label in self.parameters['gcp_labels']:
+ label = {
+ 'labelKey': each_label['label_key'],
+ 'labelValue': each_label['label_value']
+ }
+
+ labels.append(label)
+ json.update({"gcpLabels": labels})
+
+ if self.parameters.get('firewall_rule'):
+ json.update({'firewallRule': self.parameters['firewall_rule']})
+
+ if self.parameters['is_ha'] is True:
+ ha_params = dict()
+
+ if self.parameters.get('network_project_id') is not None:
+ network_project_id = self.parameters.get('network_project_id')
+ else:
+ network_project_id = self.parameters['project_id']
+
+ if not self.has_self_link(self.parameters['subnet_id']):
+ json.update({'subnetId': 'projects/%s/regions/%s/subnetworks/%s' % (network_project_id,
+ self.parameters['zone'][:-2],
+ self.parameters['subnet_id'])})
+
+ if self.parameters.get('platform_serial_number_node1'):
+ ha_params["platformSerialNumberNode1"] = self.parameters['platform_serial_number_node1']
+
+ if self.parameters.get('platform_serial_number_node2'):
+ ha_params["platformSerialNumberNode2"] = self.parameters['platform_serial_number_node2']
+
+ if self.parameters.get('node1_zone'):
+ ha_params["node1Zone"] = self.parameters['node1_zone']
+
+ if self.parameters.get('node2_zone'):
+ ha_params["node2Zone"] = self.parameters['node2_zone']
+
+ if self.parameters.get('mediator_zone'):
+ ha_params["mediatorZone"] = self.parameters['mediator_zone']
+
+ if self.parameters.get('vpc0_node_and_data_connectivity'):
+ if self.has_self_link(self.parameters['vpc0_node_and_data_connectivity']):
+ ha_params["vpc0NodeAndDataConnectivity"] = self.parameters['vpc0_node_and_data_connectivity']
+ else:
+ ha_params["vpc0NodeAndDataConnectivity"] = GOOGLE_API_URL + "/{0}/global/networks/{1}".format(
+ network_project_id, self.parameters['vpc0_node_and_data_connectivity'])
+
+ if self.parameters.get('vpc1_cluster_connectivity'):
+ if self.has_self_link(self.parameters['vpc1_cluster_connectivity']):
+ ha_params["vpc1ClusterConnectivity"] = self.parameters['vpc1_cluster_connectivity']
+ else:
+ ha_params["vpc1ClusterConnectivity"] = GOOGLE_API_URL + "/{0}/global/networks/{1}".format(
+ network_project_id, self.parameters['vpc1_cluster_connectivity'])
+
+ if self.parameters.get('vpc2_ha_connectivity'):
+ if self.has_self_link(self.parameters['vpc2_ha_connectivity']):
+ ha_params["vpc2HAConnectivity"] = self.parameters['vpc2_ha_connectivity']
+ else:
+ ha_params["vpc2HAConnectivity"] = "https://www.googleapis.com/compute/v1/projects/{0}/global/networks" \
+ "/{1}".format(network_project_id, self.parameters['vpc2_ha_connectivity'])
+
+ if self.parameters.get('vpc3_data_replication'):
+ if self.has_self_link(self.parameters['vpc3_data_replication']):
+ ha_params["vpc3DataReplication"] = self.parameters['vpc3_data_replication']
+ else:
+ ha_params["vpc3DataReplication"] = GOOGLE_API_URL + "/{0}/global/networks/{1}".format(
+ network_project_id, self.parameters['vpc3_data_replication'])
+
+ if self.parameters.get('subnet0_node_and_data_connectivity'):
+ if self.has_self_link(self.parameters['subnet0_node_and_data_connectivity']):
+ ha_params["subnet0NodeAndDataConnectivity"] = self.parameters['subnet0_node_and_data_connectivity']
+ else:
+ ha_params["subnet0NodeAndDataConnectivity"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}".\
+ format(network_project_id, self.parameters['zone'][:-2], self.parameters['subnet0_node_and_data_connectivity'])
+
+ if self.parameters.get('subnet1_cluster_connectivity'):
+ if self.has_self_link(self.parameters['subnet1_cluster_connectivity']):
+ ha_params["subnet1ClusterConnectivity"] = self.parameters['subnet1_cluster_connectivity']
+ else:
+ ha_params["subnet1ClusterConnectivity"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}".format(
+ network_project_id, self.parameters['zone'][:-2],
+ self.parameters['subnet1_cluster_connectivity'])
+
+ if self.parameters.get('subnet2_ha_connectivity'):
+ if self.has_self_link(self.parameters['subnet2_ha_connectivity']):
+ ha_params["subnet2HAConnectivity"] = self.parameters['subnet2_ha_connectivity']
+ else:
+ ha_params["subnet2HAConnectivity"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}".format(
+ network_project_id, self.parameters['zone'][:-2],
+ self.parameters['subnet2_ha_connectivity'])
+
+ if self.parameters.get('subnet3_data_replication'):
+ if self.has_self_link(self.parameters['subnet3_data_replication']):
+ ha_params["subnet3DataReplication"] = self.parameters['subnet3_data_replication']
+ else:
+ ha_params["subnet3DataReplication"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}". \
+ format(network_project_id, self.parameters['zone'][:-2],
+ self.parameters['subnet3_data_replication'])
+
+ if self.parameters.get('vpc0_firewall_rule_name'):
+ ha_params["vpc0FirewallRuleName"] = self.parameters['vpc0_firewall_ruleName']
+
+ if self.parameters.get('vpc1_firewall_rule_name'):
+ ha_params["vpc1FirewallRuleName"] = self.parameters['vpc1_firewall_rule_name']
+
+ if self.parameters.get('vpc2_firewall_rule_name'):
+ ha_params["vpc2FirewallRuleName"] = self.parameters['vpc2_firewall_rule_name']
+
+ if self.parameters.get('vpc3_firewall_rule_name'):
+ ha_params["vpc3FirewallRuleName"] = self.parameters['vpc3_firewall_rule_name']
+
+ json["haParams"] = ha_params
+
+ api_url = '%s/working-environments' % self.rest_api.api_root_path
+ response, error, on_cloud_request_id = self.rest_api.post(api_url, json, header=self.headers)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: unexpected response on creating cvo gcp: %s, %s" % (str(error), str(response)))
+ working_environment_id = response['publicId']
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "create", 60, 60)
+
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating CVO GCP: %s" % str(err))
+ return working_environment_id
+
+ def update_cvo_gcp(self, working_environment_id, modify):
+ base_url = '%s/working-environments/%s/' % (self.rest_api.api_root_path, working_environment_id)
+ for item in modify:
+ if item == 'svm_password':
+ response, error = self.na_helper.update_svm_password(base_url, self.rest_api, self.headers, self.parameters['svm_password'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'svm_name':
+ response, error = self.na_helper.update_svm_name(base_url, self.rest_api, self.headers, self.parameters['svm_name'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'gcp_labels':
+ tag_list = None
+ if 'gcp_labels' in self.parameters:
+ tag_list = self.parameters['gcp_labels']
+ response, error = self.na_helper.update_cvo_tags(base_url, self.rest_api, self.headers, 'gcp_labels', tag_list)
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'tier_level':
+ response, error = self.na_helper.update_tier_level(base_url, self.rest_api, self.headers, self.parameters['tier_level'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'writing_speed_state':
+ response, error = self.na_helper.update_writing_speed_state(base_url, self.rest_api, self.headers, self.parameters['writing_speed_state'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'ontap_version':
+ response, error = self.na_helper.upgrade_ontap_image(self.rest_api, self.headers, self.parameters['ontap_version'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'instance_type' or item == 'license_type':
+ response, error = self.na_helper.update_instance_license_type(base_url, self.rest_api, self.headers,
+ self.parameters['instance_type'],
+ self.parameters['license_type'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ def delete_cvo_gcp(self, we_id):
+ """
+ Delete GCP CVO
+ """
+ api_url = '%s/working-environments/%s' % (self.rest_api.api_root_path, we_id)
+ response, error, on_cloud_request_id = self.rest_api.delete(api_url, None, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on deleting cvo gcp: %s, %s" % (str(error), str(response)))
+
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "delete", 40, 60)
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response wait_on_completion for deleting cvo gcp: %s" % str(err))
+
+ def apply(self):
+ working_environment_id = None
+ modify = None
+
+ current, dummy = self.na_helper.get_working_environment_details_by_name(self.rest_api, self.headers,
+ self.parameters['name'], "gcp")
+ if current:
+ self.parameters['working_environment_id'] = current['publicId']
+ # check the action
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if current and self.parameters['state'] != 'absent':
+ working_environment_id = current['publicId']
+ modify, error = self.na_helper.is_cvo_update_needed(self.rest_api, self.headers, self.parameters, self.changeable_params, 'gcp')
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == "create":
+ working_environment_id = self.create_cvo_gcp()
+ elif cd_action == "delete":
+ self.delete_cvo_gcp(current['publicId'])
+ else:
+ self.update_cvo_gcp(current['publicId'], modify)
+
+ self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id)
+
+
+def main():
+ """
+ Create Cloud Manager CVO for GCP class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppCloudManagerCVOGCP()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_info.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_info.py
new file mode 100644
index 000000000..cbdf64f13
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_info.py
@@ -0,0 +1,235 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_info
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_info
+short_description: NetApp Cloud Manager info
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - This module allows you to gather various information about cloudmanager using REST APIs.
+
+options:
+ client_id:
+ required: true
+ type: str
+ description:
+ - The connector ID of the Cloud Manager Connector.
+
+ gather_subsets:
+ type: list
+ elements: str
+ description:
+ - When supplied, this argument will restrict the information collected to a given subset.
+ - Possible values for this argument include
+ - 'working_environments_info'
+ - 'aggregates_info'
+ - 'accounts_info'
+ - 'account_info'
+ - 'agents_info'
+ - 'active_agents_info'
+ default: 'all'
+
+notes:
+- Support check_mode
+'''
+
+EXAMPLES = """
+- name: Get all available subsets
+ netapp.cloudmanager.na_cloudmanager_info:
+ client_id: "{{ client_id }}"
+ refresh_token: "{{ refresh_token }}"
+ gather_subsets:
+ - all
+
+- name: Collect data for cloud manager with indicated subsets
+ netapp.cloudmanager.na_cloudmanager_info:
+ client_id: "{{ client_id }}"
+ refresh_token: "{{ refresh_token }}"
+ gather_subsets:
+ - aggregates_info
+ - working_environments_info
+"""
+
+RETURN = """
+info:
+ description:
+ - a dictionary of collected subsets
+ - each subset if in JSON format
+ returned: success
+ type: dict
+ sample: '{
+ "info": {
+ "working_environments_info": [
+ {
+ "azureVsaWorkingEnvironments": [],
+ "gcpVsaWorkingEnvironments": [],
+ "onPremWorkingEnvironments": [],
+ "vsaWorkingEnvironments": [
+ {
+ "actionsRequired": null,
+ "activeActions": null,
+ "awsProperties": null,
+ "capacityFeatures": null,
+ "cbsProperties": null,
+ "cloudProviderName": "Amazon",
+ "cloudSyncProperties": null,
+ "clusterProperties": null,
+ "complianceProperties": null,
+ "creatorUserEmail": "samlp|NetAppSAML|test_user",
+ "cronJobSchedules": null,
+ "encryptionProperties": null,
+ "fpolicyProperties": null,
+ "haProperties": null,
+ "interClusterLifs": null,
+ "isHA": false,
+ "k8sProperties": null,
+ "monitoringProperties": null,
+ "name": "testAWS",
+ "ontapClusterProperties": null,
+ "publicId": "VsaWorkingEnvironment-3txYJOsX",
+ "replicationProperties": null,
+ "reservedSize": null,
+ "saasProperties": null,
+ "schedules": null,
+ "snapshotPolicies": null,
+ "status": null,
+ "supportRegistrationInformation": [],
+ "supportRegistrationProperties": null,
+ "supportedFeatures": null,
+ "svmName": "svm_testAWS",
+ "svms": null,
+ "tenantId": "Tenant-2345",
+ "workingEnvironmentType": "VSA"
+ }
+ ]
+ },
+ null
+ ]
+ }
+ }'
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+
+class NetAppCloudmanagerInfo(object):
+ '''
+ Contains methods to parse arguments,
+ derive details of CloudmanagerInfo objects
+ and send requests to CloudmanagerInfo via
+ the restApi
+ '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ gather_subsets=dict(type='list', elements='str', default='all'),
+ client_id=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Calling generic rest_api class
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.rest_api.api_root_path = None
+ self.methods = dict(
+ working_environments_info=self.na_helper.get_working_environments_info,
+ aggregates_info=self.get_aggregates_info,
+ accounts_info=self.na_helper.get_accounts_info,
+ account_info=self.na_helper.get_account_info,
+ agents_info=self.na_helper.get_agents_info,
+ active_agents_info=self.na_helper.get_active_agents_info,
+ )
+ self.headers = {}
+ if 'client_id' in self.parameters:
+ self.headers['X-Agent-Id'] = self.rest_api.format_client_id(self.parameters['client_id'])
+
+ def get_aggregates_info(self, rest_api, headers):
+ '''
+ Get aggregates info: there are 4 types of working environments.
+ Each of the aggregates will be categorized by working environment type and working environment id
+ '''
+ aggregates = {}
+ # get list of working environments
+ working_environments, error = self.na_helper.get_working_environments_info(rest_api, headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: Failed to get working environments: %s" % str(error))
+ # Four types of working environments:
+ # azureVsaWorkingEnvironments, gcpVsaWorkingEnvironments, onPremWorkingEnvironments, vsaWorkingEnvironments
+ for working_env_type in working_environments:
+ we_aggregates = {}
+ # get aggregates for each working environment
+ for we in working_environments[working_env_type]:
+ provider = we['cloudProviderName']
+ working_environment_id = we['publicId']
+ self.na_helper.set_api_root_path(we, rest_api)
+ if provider != "Amazon":
+ api = '%s/aggregates/%s' % (rest_api.api_root_path, working_environment_id)
+ else:
+ api = '%s/aggregates?workingEnvironmentId=%s' % (rest_api.api_root_path, working_environment_id)
+ response, error, dummy = rest_api.get(api, None, header=headers)
+ if error:
+ self.module.fail_json(msg="Error: Failed to get aggregate list: %s" % str(error))
+ we_aggregates[working_environment_id] = response
+ aggregates[working_env_type] = we_aggregates
+ return aggregates
+
+ def get_info(self, func, rest_api):
+ '''
+ Main get info function
+ '''
+ return self.methods[func](rest_api, self.headers)
+
+ def apply(self):
+ '''
+ Apply action to the Cloud Manager
+ :return: None
+ '''
+ info = {}
+ if 'all' in self.parameters['gather_subsets']:
+ self.parameters['gather_subsets'] = self.methods.keys()
+ for func in self.parameters['gather_subsets']:
+ if func in self.methods:
+ info[func] = self.get_info(func, self.rest_api)
+ else:
+ msg = '%s is not a valid gather_subset. Only %s are allowed' % (func, self.methods.keys())
+ self.module.fail_json(msg=msg)
+ self.module.exit_json(changed=False, info=info)
+
+
+def main():
+ '''
+ Main function
+ '''
+ na_cloudmanager_info = NetAppCloudmanagerInfo()
+ na_cloudmanager_info.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_nss_account.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_nss_account.py
new file mode 100644
index 000000000..49e8e697e
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_nss_account.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_nss_account
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_nss_account
+short_description: NetApp Cloud Manager nss account
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create and Delete nss account.
+
+options:
+ state:
+ description:
+ - Whether the specified nss account should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ client_id:
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ required: true
+ type: str
+
+ public_id:
+ description:
+ - The ID of the NSS account.
+ type: str
+
+ name:
+ description:
+ - The name of the NSS account.
+ type: str
+
+ username:
+ description:
+ - The NSS username.
+ required: true
+ type: str
+
+ password:
+ description:
+ - The NSS password.
+ type: str
+
+ vsa_list:
+ description:
+ - The working environment list.
+ type: list
+ elements: str
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = '''
+- name: Create nss account
+ netapp.cloudmanager.na_cloudmanager_nss_account:
+ state: present
+ name: test_cloud
+ username: test_cloud
+ password: password
+ client_id: your_client_id
+ refresh_token: your_refresh_token
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppCloudmanagerNssAccount(object):
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check parameters and ensure request module is installed
+ """
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ name=dict(required=False, type='str'),
+ client_id=dict(required=True, type='str'),
+ username=dict(required=True, type='str'),
+ password=dict(required=False, type='str', no_log=True),
+ public_id=dict(required=False, type='str'),
+ vsa_list=dict(required=False, type='list', elements='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ required_if=[
+ ('state', 'present', ['password']),
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Calling generic rest_api class
+ self.rest_api = netapp_utils.CloudManagerRestAPI(self.module)
+ self.rest_api.token_type, self.rest_api.token = self.rest_api.get_token()
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.rest_api.api_root_path = '/occm/api/'
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+
+ def get_nss_account(self):
+ response, err, dummy = self.rest_api.send_request("GET", "%s/accounts" % (
+ self.rest_api.api_root_path), None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on getting nss account: %s, %s" % (str(err), str(response)))
+ if response is None:
+ return None
+ nss_accounts = []
+ if response.get('nssAccounts'):
+ nss_accounts = response['nssAccounts']
+ if len(nss_accounts) == 0:
+ return None
+ result = dict()
+ for account in nss_accounts:
+ if account['nssUserName'] == self.parameters['username']:
+ if self.parameters.get('public_id') and self.parameters['public_id'] != account['publicId']:
+ self.module.fail_json(changed=False, msg="Error: public_id '%s' does not match username."
+ % account['publicId'])
+ else:
+ self.parameters['public_id'] = account['publicId']
+ result['name'] = account['accountName']
+ result['user_name'] = account['nssUserName']
+ result['vsa_list'] = account['vsaList']
+ return result
+ return None
+
+ def create_nss_account(self):
+ account = dict()
+ if self.parameters.get('name'):
+ account['accountName'] = self.parameters['name']
+ account['providerKeys'] = {'nssUserName': self.parameters['username'],
+ 'nssPassword': self.parameters['password']}
+ account['vsaList'] = []
+ if self.parameters.get('vsa_list'):
+ account['vsaList'] = self.parameters['vsa_list']
+ response, err, second_dummy = self.rest_api.send_request("POST", "%s/accounts/nss" % (
+ self.rest_api.api_root_path), None, account, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on creating nss account: %s, %s" % (str(err), str(response)))
+
+ def delete_nss_account(self):
+ response, err, second_dummy = self.rest_api.send_request("DELETE", "%s/accounts/%s" % (
+ self.rest_api.api_root_path, self.parameters['public_id']), None, None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on deleting nss account: %s, %s" % (str(err), str(response)))
+ return None
+
+ def apply(self):
+ current = self.get_nss_account()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_nss_account()
+ elif cd_action == 'delete':
+ self.delete_nss_account()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Main Function'''
+ account = NetAppCloudmanagerNssAccount()
+ account.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_snapmirror.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_snapmirror.py
new file mode 100644
index 000000000..299e13ecf
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_snapmirror.py
@@ -0,0 +1,471 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_snapmirror
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_snapmirror
+short_description: NetApp Cloud Manager SnapMirror
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.6.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or Delete SnapMirror relationship on Cloud Manager.
+
+options:
+
+ state:
+ description:
+ - Whether the specified snapmirror relationship should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ source_working_environment_name:
+ description:
+ - The working environment name of the source volume.
+ type: str
+
+ destination_working_environment_name:
+ description:
+ - The working environment name of the destination volume.
+ type: str
+
+ source_working_environment_id:
+ description:
+ - The public ID of the working environment of the source volume.
+ type: str
+
+ destination_working_environment_id:
+ description:
+ - The public ID of the working environment of the destination volume.
+ type: str
+
+ destination_aggregate_name:
+ description:
+ - The aggregate in which the volume will be created.
+ - If not provided, Cloud Manager chooses the best aggregate for you.
+ type: str
+
+ policy:
+ description:
+ - The SnapMirror policy name.
+ type: str
+ default: 'MirrorAllSnapshots'
+
+ max_transfer_rate:
+ description:
+ - Maximum transfer rate limit KB/s.
+ - Use 0 for no limit, otherwise use number between 1024 and 2,147,482,624.
+ type: int
+ default: 100000
+
+ source_svm_name:
+ description:
+ - The name of the source SVM.
+ - The default SVM name is used, if a name is not provided.
+ type: str
+
+ destination_svm_name:
+ description:
+ - The name of the destination SVM.
+ - The default SVM name is used, if a name is not provided.
+ type: str
+
+ source_volume_name:
+ description:
+ - The name of the source volume.
+ required: true
+ type: str
+
+ destination_volume_name:
+ description:
+ - The name of the destination volume to be created for snapmirror relationship.
+ required: true
+ type: str
+
+ schedule:
+ description:
+ - The name of the Schedule.
+ type: str
+ default: '1hour'
+
+ provider_volume_type:
+ description:
+ - The underlying cloud provider volume type.
+ - For AWS ['gp3', 'gp2', 'io1', 'st1', 'sc1'].
+ - For Azure ['Premium_LRS','Standard_LRS','StandardSSD_LRS'].
+ - For GCP ['pd-balanced','pd-ssd','pd-standard'].
+ type: str
+
+ capacity_tier:
+ description:
+ - The volume capacity tier for tiering cold data to object storage.
+ - The default values for each cloud provider are as follows, Amazon 'S3', Azure 'Blob', GCP 'cloudStorage'.
+ - If NONE, the capacity tier will not be set on volume creation.
+ type: str
+ choices: ['S3', 'Blob', 'cloudStorage', 'NONE']
+
+ tenant_id:
+ description:
+ - The NetApp account ID that the Connector will be associated with. To be used only when using FSx.
+ type: str
+ version_added: 21.14.0
+
+ client_id:
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ required: true
+ type: str
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = '''
+- name: Create snapmirror with working_environment_name
+ netapp.cloudmanager.na_cloudmanager_snapmirror:
+ state: present
+ source_working_environment_name: source
+ destination_working_environment_name: dest
+ source_volume_name: source
+ destination_volume_name: source_copy
+ policy: MirrorAllSnapshots
+ schedule: 5min
+ max_transfer_rate: 102400
+ client_id: client_id
+ refresh_token: refresh_token
+
+- name: Delete snapmirror
+ netapp.cloudmanager.na_cloudmanager_snapmirror:
+ state: absent
+ source_working_environment_name: source
+ destination_working_environment_name: dest
+ source_volume_name: source
+ destination_volume_name: source_copy
+ client_id: client_id
+ refresh_token: refresh_token
+'''
+
+RETURN = r''' # '''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+
+PROVIDER_TO_CAPACITY_TIER = {'amazon': 'S3', 'azure': 'Blob', 'gcp': 'cloudStorage'}
+
+
+class NetAppCloudmanagerSnapmirror:
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check parameters and ensure request module is installed
+ """
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ source_working_environment_id=dict(required=False, type='str'),
+ destination_working_environment_id=dict(required=False, type='str'),
+ source_working_environment_name=dict(required=False, type='str'),
+ destination_working_environment_name=dict(required=False, type='str'),
+ destination_aggregate_name=dict(required=False, type='str'),
+ policy=dict(required=False, type='str', default='MirrorAllSnapshots'),
+ max_transfer_rate=dict(required=False, type='int', default='100000'),
+ schedule=dict(required=False, type='str', default='1hour'),
+ source_svm_name=dict(required=False, type='str'),
+ destination_svm_name=dict(required=False, type='str'),
+ source_volume_name=dict(required=True, type='str'),
+ destination_volume_name=dict(required=True, type='str'),
+ capacity_tier=dict(required=False, type='str', choices=['NONE', 'S3', 'Blob', 'cloudStorage']),
+ provider_volume_type=dict(required=False, type='str'),
+ tenant_id=dict(required=False, type='str'),
+ client_id=dict(required=True, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[
+ ['source_working_environment_id', 'source_working_environment_name'],
+ ['refresh_token', 'sa_client_id'],
+ ],
+ required_together=(['source_working_environment_id', 'destination_working_environment_id'],
+ ['source_working_environment_name', 'destination_working_environment_name'],
+ ['sa_client_id', 'sa_secret_key'],
+ ),
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.rest_api.api_root_path = None
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+ if self.rest_api.simulator:
+ self.headers.update({'x-simulator': 'true'})
+
+ def get_snapmirror(self):
+ source_we_info, dest_we_info, err = self.na_helper.get_working_environment_detail_for_snapmirror(self.rest_api, self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg=err)
+
+ get_url = '/occm/api/replication/status/%s' % source_we_info['publicId']
+ snapmirror_info, err, dummy = self.rest_api.send_request("GET", get_url, None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg='Error getting snapmirror relationship %s: %s.' % (err, snapmirror_info))
+ sm_found = False
+ snapmirror = None
+ for sm in snapmirror_info:
+ if sm['destination']['volumeName'] == self.parameters['destination_volume_name']:
+ sm_found = True
+ snapmirror = sm
+ break
+
+ if not sm_found:
+ return None
+ result = {
+ 'source_working_environment_id': source_we_info['publicId'],
+ 'destination_svm_name': snapmirror['destination']['svmName'],
+ 'destination_working_environment_id': dest_we_info['publicId'],
+ }
+ if not dest_we_info['publicId'].startswith('fs-'):
+ result['cloud_provider_name'] = dest_we_info['cloudProviderName']
+ return result
+
+ def create_snapmirror(self):
+ snapmirror_build_data = {}
+ replication_request = {}
+ replication_volume = {}
+ source_we_info, dest_we_info, err = self.na_helper.get_working_environment_detail_for_snapmirror(self.rest_api, self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg=err)
+ if self.parameters.get('capacity_tier') is not None:
+ if self.parameters['capacity_tier'] == 'NONE':
+ self.parameters.pop('capacity_tier')
+ else:
+ if dest_we_info.get('cloudProviderName'):
+ self.parameters['capacity_tier'] = PROVIDER_TO_CAPACITY_TIER[dest_we_info['cloudProviderName'].lower()]
+
+ interclusterlifs_info = self.get_interclusterlifs(source_we_info['publicId'], dest_we_info['publicId'])
+
+ if source_we_info['workingEnvironmentType'] != 'ON_PREM':
+ source_volumes = self.get_volumes(source_we_info, self.parameters['source_volume_name'])
+ else:
+ source_volumes = self.get_volumes_on_prem(source_we_info, self.parameters['source_volume_name'])
+
+ if len(source_volumes) == 0:
+ self.module.fail_json(changed=False, msg='source volume not found')
+
+ vol_found = False
+ vol_dest_quote = {}
+ source_volume_resp = {}
+ for vol in source_volumes:
+ if vol['name'] == self.parameters['source_volume_name']:
+ vol_found = True
+ vol_dest_quote = vol
+ source_volume_resp = vol
+ if self.parameters.get('source_svm_name') is not None and vol['svmName'] != self.parameters['source_svm_name']:
+ vol_found = False
+ if vol_found:
+ break
+
+ if not vol_found:
+ self.module.fail_json(changed=False, msg='source volume not found')
+
+ if self.parameters.get('source_svm_name') is None:
+ self.parameters['source_svm_name'] = source_volume_resp['svmName']
+
+ if self.parameters.get('destination_svm_name') is None:
+ if dest_we_info.get('svmName') is not None:
+ self.parameters['destination_svm_name'] = dest_we_info['svmName']
+ else:
+ self.parameters['destination_working_environment_name'] = dest_we_info['name']
+ dest_working_env_detail, err = self.na_helper.get_working_environment_details_by_name(self.rest_api,
+ self.headers,
+ self.parameters['destination_working_environment_name'])
+ if err:
+ self.module.fail_json(changed=False, msg='Error getting destination info %s: %s.' % (err, dest_working_env_detail))
+ self.parameters['destination_svm_name'] = dest_working_env_detail['svmName']
+
+ if dest_we_info.get('workingEnvironmentType') and dest_we_info['workingEnvironmentType'] != 'ON_PREM'\
+ and not dest_we_info['publicId'].startswith('fs-'):
+ quote = self.build_quote_request(source_we_info, dest_we_info, vol_dest_quote)
+ quote_response = self.quote_volume(quote)
+ replication_volume['numOfDisksApprovedToAdd'] = int(quote_response['numOfDisks'])
+ if 'iops' in quote:
+ replication_volume['iops'] = quote['iops']
+ if 'throughput' in quote:
+ replication_volume['throughput'] = quote['throughput']
+ if self.parameters.get('destination_aggregate_name') is not None:
+ replication_volume['advancedMode'] = True
+ else:
+ replication_volume['advancedMode'] = False
+ replication_volume['destinationAggregateName'] = quote_response['aggregateName']
+ if self.parameters.get('provider_volume_type') is None:
+ replication_volume['destinationProviderVolumeType'] = source_volume_resp['providerVolumeType']
+
+ if self.parameters.get('capacity_tier') is not None:
+ replication_volume['destinationCapacityTier'] = self.parameters['capacity_tier']
+ replication_request['sourceWorkingEnvironmentId'] = source_we_info['publicId']
+ if dest_we_info['publicId'].startswith('fs-'):
+ replication_request['destinationFsxId'] = dest_we_info['publicId']
+ else:
+ replication_request['destinationWorkingEnvironmentId'] = dest_we_info['publicId']
+ replication_volume['sourceVolumeName'] = self.parameters['source_volume_name']
+ replication_volume['destinationVolumeName'] = self.parameters['destination_volume_name']
+ replication_request['policyName'] = self.parameters['policy']
+ replication_request['scheduleName'] = self.parameters['schedule']
+ replication_request['maxTransferRate'] = self.parameters['max_transfer_rate']
+ replication_volume['sourceSvmName'] = source_volume_resp['svmName']
+ replication_volume['destinationSvmName'] = self.parameters['destination_svm_name']
+ replication_request['sourceInterclusterLifIps'] = [interclusterlifs_info['interClusterLifs'][0]['address']]
+ replication_request['destinationInterclusterLifIps'] = [interclusterlifs_info['peerInterClusterLifs'][0]['address']]
+
+ snapmirror_build_data['replicationRequest'] = replication_request
+ snapmirror_build_data['replicationVolume'] = replication_volume
+
+ if dest_we_info['publicId'].startswith('fs-'):
+ api = '/occm/api/replication/fsx'
+ elif dest_we_info['workingEnvironmentType'] != 'ON_PREM':
+ api = '/occm/api/replication/vsa'
+ else:
+ api = '/occm/api/replication/onprem'
+
+ response, err, on_cloud_request_id = self.rest_api.send_request("POST", api, None, snapmirror_build_data, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg='Error creating snapmirror relationship %s: %s.' % (err, response))
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % (str(on_cloud_request_id))
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "snapmirror", "create", 20, 5)
+ if err is not None:
+ self.module.fail_json(changed=False, msg=err)
+
+ def get_volumes(self, working_environment_detail, name):
+ self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
+ response, err, dummy = self.rest_api.send_request("GET", "%s/volumes?workingEnvironmentId=%s&name=%s" % (
+ self.rest_api.api_root_path, working_environment_detail['publicId'], name), None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg='Error getting volume %s: %s.' % (err, response))
+ return response
+
+ def quote_volume(self, quote):
+ response, err, on_cloud_request_id = self.rest_api.send_request("POST", '%s/volumes/quote' %
+ self.rest_api.api_root_path, None, quote, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg='Error quoting destination volume %s: %s.' % (err, response))
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % (str(on_cloud_request_id))
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "volume", "quote", 20, 5)
+ if err is not None:
+ self.module.fail_json(changed=False, msg=err)
+ return response
+
+ def get_volumes_on_prem(self, working_environment_detail, name):
+ response, err, dummy = self.rest_api.send_request("GET", "/occm/api/onprem/volumes?workingEnvironmentId=%s&name=%s" %
+ (working_environment_detail['publicId'], name), None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg='Error getting volume on prem %s: %s.' % (err, response))
+ return response
+
+ def get_aggregate_detail(self, working_environment_detail, aggregate_name):
+ if working_environment_detail['workingEnvironmentType'] == 'ON_PREM':
+ api = "/occm/api/onprem/aggregates?workingEnvironmentId=%s" % working_environment_detail['publicId']
+ else:
+ self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
+ api_root_path = self.rest_api.api_root_path
+ if working_environment_detail['cloudProviderName'] != "Amazon":
+ api = '%s/aggregates/%s'
+ else:
+ api = '%s/aggregates?workingEnvironmentId=%s'
+ api = api % (api_root_path, working_environment_detail['publicId'])
+ response, error, dummy = self.rest_api.get(api, header=self.headers)
+ if error:
+ self.module.fail_json(msg="Error: Failed to get aggregate list: %s" % str(error))
+ for aggr in response:
+ if aggr['name'] == aggregate_name:
+ return aggr
+ return None
+
+ def build_quote_request(self, source_we_info, dest_we_info, vol_dest_quote):
+ quote = dict()
+ quote['size'] = {'size': vol_dest_quote['size']['size'], 'unit': vol_dest_quote['size']['unit']}
+ quote['name'] = self.parameters['destination_volume_name']
+ quote['snapshotPolicyName'] = vol_dest_quote['snapshotPolicy']
+ quote['enableDeduplication'] = vol_dest_quote['deduplication']
+ quote['enableThinProvisioning'] = vol_dest_quote['thinProvisioning']
+ quote['enableCompression'] = vol_dest_quote['compression']
+ quote['verifyNameUniqueness'] = True
+ quote['replicationFlow'] = True
+
+ # Use source working environment to get physical properties info of volumes
+ aggregate = self.get_aggregate_detail(source_we_info, vol_dest_quote['aggregateName'])
+ if aggregate is None:
+ self.module.fail_json(changed=False, msg='Error getting aggregate on source volume')
+ # All the volumes in one aggregate have the same physical properties
+ if source_we_info['workingEnvironmentType'] != 'ON_PREM':
+ if aggregate['providerVolumes'][0]['diskType'] == 'gp3' or aggregate['providerVolumes'][0]['diskType'] == 'io1'\
+ or aggregate['providerVolumes'][0]['diskType'] == 'io2':
+ quote['iops'] = aggregate['providerVolumes'][0]['iops']
+ if aggregate['providerVolumes'][0]['diskType'] == 'gp3':
+ quote['throughput'] = aggregate['providerVolumes'][0]['throughput']
+ quote['workingEnvironmentId'] = dest_we_info['publicId']
+ quote['svmName'] = self.parameters['destination_svm_name']
+ if self.parameters.get('capacity_tier') is not None:
+ quote['capacityTier'] = self.parameters['capacity_tier']
+
+ if self.parameters.get('provider_volume_type') is None:
+ quote['providerVolumeType'] = vol_dest_quote['providerVolumeType']
+ else:
+ quote['providerVolumeType'] = self.parameters['provider_volume_type']
+
+ return quote
+
+ def delete_snapmirror(self, sm_detail):
+ api_delete = '/occm/api/replication/%s/%s/%s' %\
+ (sm_detail['destination_working_environment_id'], sm_detail['destination_svm_name'], self.parameters['destination_volume_name'])
+ dummy, err, dummy_second = self.rest_api.send_request("DELETE", api_delete, None, None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg='Error deleting snapmirror relationship %s: %s.' % (err, dummy))
+
+ def get_interclusterlifs(self, source_we_id, dest_we_id):
+ api_get = '/occm/api/replication/intercluster-lifs?peerWorkingEnvironmentId=%s&workingEnvironmentId=%s' % (dest_we_id, source_we_id)
+ response, err, dummy_second = self.rest_api.send_request("GET", api_get, None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg='Error getting interclusterlifs %s: %s.' % (err, response))
+ return response
+
+ def apply(self):
+ current = self.get_snapmirror()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_snapmirror()
+ elif cd_action == 'delete':
+ self.delete_snapmirror(current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Main Function'''
+ volume = NetAppCloudmanagerSnapmirror()
+ volume.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_volume.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_volume.py
new file mode 100644
index 000000000..62c898c57
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_volume.py
@@ -0,0 +1,660 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_volume
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_volume
+short_description: NetApp Cloud Manager volume
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, Modify or Delete volume on Cloud Manager.
+
+options:
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ description:
+ - The name of the volume.
+ required: true
+ type: str
+
+ working_environment_name:
+ description:
+ - The working environment name where the volume will be created.
+ type: str
+
+ working_environment_id:
+ description:
+ - The public ID of the working environment where the volume will be created.
+ type: str
+
+ client_id:
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ required: true
+ type: str
+
+ size:
+ description:
+ - The size of the volume.
+ type: float
+
+ size_unit:
+ description:
+ - The size unit of volume.
+ choices: ['GB']
+ default: 'GB'
+ type: str
+
+ snapshot_policy_name:
+ description:
+ - The snapshot policy name.
+ type: str
+
+ provider_volume_type:
+ description:
+ - The underlying cloud provider volume type.
+ - For AWS is ["gp3", "gp2", "io1", "st1", "sc1"].
+ - For Azure is ['Premium_LRS','Standard_LRS','StandardSSD_LRS'].
+ - For GCP is ['pd-balanced','pd-ssd','pd-standard'].
+ type: str
+
+ enable_deduplication:
+ description:
+ - Enabling deduplication.
+ - Default to true if not specified.
+ type: bool
+
+ enable_compression:
+ description:
+ - Enabling cpmpression.
+ - Default to true if not specified.
+ type: bool
+
+ enable_thin_provisioning:
+ description:
+ - Enabling thin provisioning.
+ - Default to true if not specified.
+ type: bool
+
+ svm_name:
+ description:
+ - The name of the SVM. The default SVM name is used, if a name is not provided.
+ type: str
+
+ aggregate_name:
+ description:
+ - The aggregate in which the volume will be created. If not provided, Cloud Manager chooses the best aggregate.
+ type: str
+
+ capacity_tier:
+ description:
+ - The volume's capacity tier for tiering cold data to object storage.
+ - The default values for each cloud provider are as follows. Amazon as 'S3', Azure as 'Blob', GCP as 'cloudStorage'.
+ - If 'NONE', the capacity tier will not be set on volume creation.
+ choices: ['NONE', 'S3', 'Blob', 'cloudStorage']
+ type: str
+
+ tiering_policy:
+ description:
+ - The tiering policy.
+ choices: ['none', 'snapshot_only', 'auto', 'all']
+ type: str
+
+ export_policy_type:
+ description:
+ - The export policy type (NFS protocol parameters).
+ type: str
+
+ export_policy_ip:
+ description:
+ - Custom export policy list of IPs (NFS protocol parameters).
+ type: list
+ elements: str
+
+ export_policy_nfs_version:
+ description:
+ - Export policy protocol (NFS protocol parameters).
+ type: list
+ elements: str
+
+ iops:
+ description:
+ - Provisioned IOPS. Needed only when provider_volume_type is "io1".
+ type: int
+
+ throughput:
+ description:
+ - Unit is Mb/s. Valid range 125-1000.
+ - Required only when provider_volume_type is 'gp3'.
+ type: int
+
+ volume_protocol:
+ description:
+ - The protocol for the volume. This affects the provided parameters.
+ choices: ['nfs', 'cifs', 'iscsi']
+ type: str
+ default: 'nfs'
+
+ share_name:
+ description:
+ - Share name (CIFS protocol parameters).
+ type: str
+
+ permission:
+ description:
+ - CIFS share permission type (CIFS protocol parameters).
+ type: str
+
+ users:
+ description:
+ - List of users with the permission (CIFS protocol parameters).
+ type: list
+ elements: str
+
+ igroups:
+ description:
+ - List of igroups (iSCSI protocol parameters).
+ type: list
+ elements: str
+
+ os_name:
+ description:
+ - Operating system (iSCSI protocol parameters).
+ type: str
+
+ tenant_id:
+ description:
+ - The NetApp account ID that the Connector will be associated with. To be used only when using FSx.
+ type: str
+ version_added: 21.20.0
+
+ initiators:
+ description:
+ - Set of attributes of Initiators (iSCSI protocol parameters).
+ type: list
+ elements: dict
+ suboptions:
+ iqn:
+ description: The initiator node name.
+ required: true
+ type: str
+ alias:
+ description: The alias which associates with the node.
+ required: true
+ type: str
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = '''
+- name: Create nfs volume with working_environment_name
+ netapp.cloudmanager.na_cloudmanager_volume:
+ state: present
+ name: test_vol
+ size: 15
+ size_unit: GB
+ working_environment_name: working_environment_1
+ client_id: client_id
+ refresh_token: refresh_token
+ svm_name: svm_1
+ snapshot_policy_name: default
+ export_policy_type: custom
+ export_policy_ip: ["10.0.0.1/16"]
+ export_policy_nfs_version: ["nfs3","nfs4"]
+
+- name: Delete volume
+ netapp.cloudmanager.na_cloudmanager_volume:
+ state: absent
+ name: test_vol
+ working_environment_name: working_environment_1
+ client_id: client_id
+ refresh_token: refresh_token
+ svm_name: svm_1
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppCloudmanagerVolume(object):
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check parameters and ensure request module is installed
+ """
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ working_environment_id=dict(required=False, type='str'),
+ working_environment_name=dict(required=False, type='str'),
+ client_id=dict(required=True, type='str'),
+ size=dict(required=False, type='float'),
+ size_unit=dict(required=False, choices=['GB'], default='GB'),
+ snapshot_policy_name=dict(required=False, type='str'),
+ provider_volume_type=dict(required=False, type='str'),
+ enable_deduplication=dict(required=False, type='bool'),
+ enable_thin_provisioning=dict(required=False, type='bool'),
+ enable_compression=dict(required=False, type='bool'),
+ svm_name=dict(required=False, type='str'),
+ aggregate_name=dict(required=False, type='str'),
+ capacity_tier=dict(required=False, type='str', choices=['NONE', 'S3', 'Blob', 'cloudStorage']),
+ tiering_policy=dict(required=False, type='str', choices=['none', 'snapshot_only', 'auto', 'all']),
+ export_policy_type=dict(required=False, type='str'),
+ export_policy_ip=dict(required=False, type='list', elements='str'),
+ export_policy_nfs_version=dict(required=False, type='list', elements='str'),
+ iops=dict(required=False, type='int'),
+ throughput=dict(required=False, type='int'),
+ volume_protocol=dict(required=False, type='str', choices=['nfs', 'cifs', 'iscsi'], default='nfs'),
+ share_name=dict(required=False, type='str'),
+ permission=dict(required=False, type='str'),
+ users=dict(required=False, type='list', elements='str'),
+ igroups=dict(required=False, type='list', elements='str'),
+ os_name=dict(required=False, type='str'),
+ tenant_id=dict(required=False, type='str'),
+ initiators=dict(required=False, type='list', elements='dict', options=dict(
+ alias=dict(required=True, type='str'),
+ iqn=dict(required=True, type='str'),)),
+
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[
+ ['refresh_token', 'sa_client_id'],
+ ['working_environment_name', 'working_environment_id'],
+ ],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ required_if=[
+ ['provider_volume_type', 'gp3', ['iops', 'throughput']],
+ ['provider_volume_type', 'io1', ['iops']],
+ ['capacity_tier', 'S3', ['tiering_policy']],
+ ],
+ # enable_thin_provisioning reflects storage efficiency.
+ required_by={
+ 'capacity_tier': ('tiering_policy', 'enable_thin_provisioning'),
+ },
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Calling generic rest_api class
+ self.rest_api = netapp_utils.CloudManagerRestAPI(self.module)
+ self.rest_api.token_type, self.rest_api.token = self.rest_api.get_token()
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+ if self.rest_api.simulator:
+ self.headers.update({'x-simulator': 'true'})
+ if self.parameters.get('tenant_id'):
+ working_environment_detail, error = self.na_helper.get_aws_fsx_details(self.rest_api, self.headers, self.parameters['working_environment_name'])
+ elif self.parameters.get('working_environment_id'):
+ working_environment_detail, error = self.na_helper.get_working_environment_details(self.rest_api, self.headers)
+ else:
+ working_environment_detail, error = self.na_helper.get_working_environment_details_by_name(self.rest_api,
+ self.headers,
+ self.parameters['working_environment_name'])
+ if working_environment_detail is None:
+ self.module.fail_json(msg="Error: Cannot find working environment, if it is an AWS FSxN, please provide tenant_id: %s" % str(error))
+ self.parameters['working_environment_id'] = working_environment_detail['publicId']\
+ if working_environment_detail.get('publicId') else working_environment_detail['id']
+ self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
+ self.is_fsx = self.parameters['working_environment_id'].startswith('fs-')
+
+ if self.parameters.get('svm_name') is None:
+ fsx_path = ''
+ if self.is_fsx:
+ fsx_path = '/svms'
+ response, err, dummy = self.rest_api.send_request("GET", "%s/working-environments/%s%s" % (
+ self.rest_api.api_root_path, self.parameters['working_environment_id'], fsx_path), None, None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on getting svm: %s, %s" % (str(err), str(response)))
+ if self.is_fsx:
+ self.parameters['svm_name'] = response[0]['name']
+ else:
+ self.parameters['svm_name'] = response['svmName']
+
+ if self.parameters['volume_protocol'] == 'nfs':
+ extra_options = []
+ for option in ['share_name', 'permission', 'users', 'igroups', 'os_name', 'initiator']:
+ if self.parameters.get(option) is not None:
+ extra_options.append(option)
+ if len(extra_options) > 0:
+ self.module.fail_json(msg="Error: The following options are not allowed when volume_protocol is nfs: "
+ " %s" % extra_options)
+ elif self.parameters['volume_protocol'] == 'cifs':
+ extra_options = []
+ for option in ['export_policy_type', 'export_policy_ip', 'export_policy_nfs_version', 'igroups', 'os_name', 'initiator']:
+ if self.parameters.get(option) is not None:
+ extra_options.append(option)
+ if len(extra_options) > 0:
+ self.module.fail_json(msg="Error: The following options are not allowed when volume_protocol is cifs: "
+ "%s" % extra_options)
+ else:
+ extra_options = []
+ for option in ['export_policy_type', 'export_policy_ip', 'export_policy_nfs_version', 'share_name', 'permission', 'users']:
+ if self.parameters.get(option) is not None:
+ extra_options.append(option)
+ if len(extra_options) > 0:
+ self.module.fail_json(msg="Error: The following options are not allowed when volume_protocol is iscsi: "
+ "%s" % extra_options)
+
+ if self.parameters.get('igroups'):
+ current_igroups = []
+ for igroup in self.parameters['igroups']:
+ current = self.get_igroup(igroup)
+ current_igroups.append(current)
+ if any(isinstance(x, dict) for x in current_igroups) and None in current_igroups:
+ self.module.fail_json(changed=False, msg="Error: can not specify existing"
+ "igroup and new igroup together.")
+ if len(current_igroups) > 1 and None in current_igroups:
+ self.module.fail_json(changed=False, msg="Error: can not create more than one igroups.")
+ if current_igroups[0] is None:
+ if self.parameters.get('initiators') is None:
+ self.module.fail_json(changed=False, msg="Error: initiator is required when creating new igroup.")
+
+ if self.parameters.get('users'):
+ # When creating volume, 'Everyone' must have upper case E, 'everyone' will not work.
+ # When modifying volume, 'everyone' is fine.
+ new_users = []
+ for user in self.parameters['users']:
+ if user.lower() == 'everyone':
+ new_users.append('Everyone')
+ else:
+ new_users.append(user)
+ self.parameters['users'] = new_users
+
+ def get_volume(self):
+ if self.is_fsx:
+ query_param = 'fileSystemId'
+ else:
+ query_param = 'workingEnvironmentId'
+ response, err, dummy = self.rest_api.send_request("GET", "%s/volumes?%s=%s" % (
+ self.rest_api.api_root_path, query_param, self.parameters['working_environment_id']), None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on getting volume: %s, %s" % (str(err), str(response)))
+ target_vol = dict()
+ if response is None:
+ return None
+ for volume in response:
+ if volume['name'] == self.parameters['name']:
+ target_vol['name'] = volume['name']
+ target_vol['enable_deduplication'] = volume['deduplication']
+ target_vol['enable_thin_provisioning'] = volume['thinProvisioning']
+ target_vol['enable_compression'] = volume['compression']
+ if self.parameters.get('size'):
+ target_vol['size'] = volume['size']['size']
+ if self.parameters.get('size_unit'):
+ target_vol['size_unit'] = volume['size']['unit']
+ if self.parameters.get('export_policy_nfs_version') and volume.get('exportPolicyInfo'):
+ target_vol['export_policy_nfs_version'] = volume['exportPolicyInfo']['nfsVersion']
+ if self.parameters.get('export_policy_ip') and volume.get('exportPolicyInfo'):
+ target_vol['export_policy_ip'] = volume['exportPolicyInfo']['ips']
+ if self.parameters.get('export_policy_type') and volume.get('exportPolicyInfo'):
+ target_vol['export_policy_type'] = volume['exportPolicyInfo']['policyType']
+ if self.parameters.get('snapshot_policy'):
+ target_vol['snapshot_policy'] = volume['snapshotPolicy']
+ if self.parameters.get('provider_volume_type'):
+ target_vol['provider_volume_type'] = volume['providerVolumeType']
+ if self.parameters.get('capacity_tier') and self.parameters.get('capacity_tier') != 'NONE':
+ target_vol['capacity_tier'] = volume['capacityTier']
+ if self.parameters.get('tiering_policy'):
+ target_vol['tiering_policy'] = volume['tieringPolicy']
+ if self.parameters.get('share_name') and volume.get('shareInfo'):
+ target_vol['share_name'] = volume['shareInfo'][0]['shareName']
+ if self.parameters.get('users') and volume.get('shareInfo'):
+ if len(volume['shareInfo'][0]['accessControlList']) > 0:
+ target_vol['users'] = volume['shareInfo'][0]['accessControlList'][0]['users']
+ else:
+ target_vol['users'] = []
+ if self.parameters.get('users') and volume.get('shareInfo'):
+ if len(volume['shareInfo'][0]['accessControlList']) > 0:
+ target_vol['permission'] = volume['shareInfo'][0]['accessControlList'][0]['permission']
+ else:
+ target_vol['permission'] = []
+ if self.parameters.get('os_name') and volume.get('iscsiInfo'):
+ target_vol['os_name'] = volume['iscsiInfo']['osName']
+ if self.parameters.get('igroups') and volume.get('iscsiInfo'):
+ target_vol['igroups'] = volume['iscsiInfo']['igroups']
+ return target_vol
+ return None
+
+ def create_volume(self):
+ exclude_list = ['client_id', 'size_unit', 'export_policy_name', 'export_policy_type', 'export_policy_ip',
+ 'export_policy_nfs_version', 'capacity_tier']
+ quote = self.na_helper.convert_module_args_to_api(self.parameters, exclude_list)
+ quote['verifyNameUniqueness'] = True # Always hard coded to true.
+ quote['unit'] = self.parameters['size_unit']
+ quote['size'] = {'size': self.parameters['size'], 'unit': self.parameters['size_unit']}
+ create_aggregate_if_not_exists = True
+ if self.parameters.get('aggregate_name'):
+ quote['aggregateName'] = self.parameters['aggregate_name']
+ create_aggregate_if_not_exists = False
+
+ if self.parameters.get('capacity_tier') and self.parameters['capacity_tier'] != "NONE":
+ quote['capacityTier'] = self.parameters['capacity_tier']
+
+ if self.parameters['volume_protocol'] == 'nfs':
+ quote['exportPolicyInfo'] = dict()
+ if self.parameters.get('export_policy_type'):
+ quote['exportPolicyInfo']['policyType'] = self.parameters['export_policy_type']
+ if self.parameters.get('export_policy_ip'):
+ quote['exportPolicyInfo']['ips'] = self.parameters['export_policy_ip']
+ if self.parameters.get('export_policy_nfs_version'):
+ quote['exportPolicyInfo']['nfsVersion'] = self.parameters['export_policy_nfs_version']
+ elif self.parameters['volume_protocol'] == 'iscsi':
+ iscsi_info = self.iscsi_volume_helper()
+ quote.update(iscsi_info)
+ else:
+ quote['shareInfo'] = dict()
+ quote['shareInfo']['accessControl'] = dict()
+ quote['shareInfo']['accessControl']['users'] = self.parameters['users']
+ if self.parameters.get('permission'):
+ quote['shareInfo']['accessControl']['permission'] = self.parameters['permission']
+ if self.parameters.get('share_name'):
+ quote['shareInfo']['shareName'] = self.parameters['share_name']
+ if not self.is_fsx:
+ response, err, dummy = self.rest_api.send_request("POST", "%s/volumes/quote" % self.rest_api.api_root_path,
+ None, quote, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on quoting volume: %s, %s" % (str(err), str(response)))
+ quote['newAggregate'] = response['newAggregate']
+ quote['aggregateName'] = response['aggregateName']
+ quote['maxNumOfDisksApprovedToAdd'] = response['numOfDisks']
+ else:
+ quote['fileSystemId'] = self.parameters['working_environment_id']
+ if self.parameters.get('enable_deduplication'):
+ quote['deduplication'] = self.parameters.get('enable_deduplication')
+ if self.parameters.get('enable_thin_provisioning'):
+ quote['thinProvisioning'] = self.parameters.get('enable_thin_provisioning')
+ if self.parameters.get('enable_compression'):
+ quote['compression'] = self.parameters.get('enable_compression')
+ if self.parameters.get('snapshot_policy_name'):
+ quote['snapshotPolicy'] = self.parameters['snapshot_policy_name']
+ if self.parameters.get('capacity_tier') and self.parameters['capacity_tier'] != "NONE":
+ quote['capacityTier'] = self.parameters['capacity_tier']
+ if self.parameters.get('tiering_policy'):
+ quote['tieringPolicy'] = self.parameters['tiering_policy']
+ if self.parameters.get('provider_volume_type'):
+ quote['providerVolumeType'] = self.parameters['provider_volume_type']
+ if self.parameters.get('iops'):
+ quote['iops'] = self.parameters.get('iops')
+ if self.parameters.get('throughput'):
+ quote['throughput'] = self.parameters.get('throughput')
+ response, err, on_cloud_request_id = self.rest_api.send_request("POST", "%s/volumes?createAggregateIfNotFound=%s" % (
+ self.rest_api.api_root_path, create_aggregate_if_not_exists), None, quote, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected on creating volume: %s, %s" % (str(err), str(response)))
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % (str(on_cloud_request_id))
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "volume", "create", 20, 5)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response wait_on_completion for creating volume: %s, %s" % (str(err), str(response)))
+
+ def modify_volume(self, modify):
+ vol = dict()
+ if self.parameters['volume_protocol'] == 'nfs':
+ export_policy_info = dict()
+ if self.parameters.get('export_policy_type'):
+ export_policy_info['policyType'] = self.parameters['export_policy_type']
+ if self.parameters.get('export_policy_ip'):
+ export_policy_info['ips'] = self.parameters['export_policy_ip']
+ if self.parameters.get('export_policy_nfs_version'):
+ export_policy_info['nfsVersion'] = self.parameters['export_policy_nfs_version']
+ vol['exportPolicyInfo'] = export_policy_info
+ elif self.parameters['volume_protocol'] == 'cifs':
+ vol['shareInfo'] = dict()
+ vol['shareInfo']['accessControlList'] = []
+ vol['shareInfo']['accessControlList'].append(dict())
+ if self.parameters.get('users'):
+ vol['shareInfo']['accessControlList'][0]['users'] = self.parameters['users']
+ if self.parameters.get('permission'):
+ vol['shareInfo']['accessControlList'][0]['permission'] = self.parameters['permission']
+ if self.parameters.get('share_name'):
+ vol['shareInfo']['shareName'] = self.parameters['share_name']
+ if modify.get('snapshot_policy_name'):
+ vol['snapshotPolicyName'] = self.parameters.get('snapshot_policy_name')
+ if modify.get('tiering_policy'):
+ vol['tieringPolicy'] = self.parameters.get('tiering_policy')
+ response, err, dummy = self.rest_api.send_request("PUT", "%s/volumes/%s/%s/%s" % (
+ self.rest_api.api_root_path, self.parameters['working_environment_id'], self.parameters['svm_name'],
+ self.parameters['name']), None, vol, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on modifying volume: %s, %s" % (str(err), str(response)))
+
+ def delete_volume(self):
+ response, err, dummy = self.rest_api.send_request("DELETE", "%s/volumes/%s/%s/%s" % (
+ self.rest_api.api_root_path, self.parameters['working_environment_id'], self.parameters['svm_name'],
+ self.parameters['name']), None, None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on deleting volume: %s, %s" % (str(err), str(response)))
+
+ def get_initiator(self, alias_name):
+ response, err, dummy = self.rest_api.send_request("GET", "%s/volumes/initiator" % (
+ self.rest_api.api_root_path), None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on getting initiator: %s, %s" % (str(err), str(response)))
+ result = dict()
+ if response is None:
+ return None
+ for initiator in response:
+ if initiator.get('aliasName') and initiator.get('aliasName') == alias_name:
+ result['alias'] = initiator.get('aliasName')
+ result['iqn'] = initiator.get('iqn')
+ return result
+ return None
+
+ def create_initiator(self, initiator):
+ ini = self.na_helper.convert_module_args_to_api(initiator)
+ response, err, dummy = self.rest_api.send_request("POST", "%s/volumes/initiator" % (
+ self.rest_api.api_root_path), None, ini, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on creating initiator: %s, %s" % (str(err), str(response)))
+
+ def get_igroup(self, igroup_name):
+ response, err, dummy = self.rest_api.send_request("GET", "%s/volumes/igroups/%s/%s" % (
+ self.rest_api.api_root_path, self.parameters['working_environment_id'], self.parameters['svm_name']),
+ None, None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on getting igroup: %s, %s" % (str(err), str(response)))
+ result = dict()
+ if response is None:
+ return None
+ for igroup in response:
+ if igroup['igroupName'] == igroup_name:
+ result['igroup_name'] = igroup['igroupName']
+ result['os_type'] = igroup['osType']
+ result['portset_name'] = igroup['portsetName']
+ result['igroup_type'] = igroup['igroupType']
+ result['initiators'] = igroup['initiators']
+ return result
+ return None
+
+ def iscsi_volume_helper(self):
+ quote = dict()
+ quote['iscsiInfo'] = dict()
+ if self.parameters.get('igroups'):
+ current_igroups = []
+ for igroup in self.parameters['igroups']:
+ current = self.get_igroup(igroup)
+ current_igroups.append(current)
+ for igroup in current_igroups:
+ if igroup is None:
+ quote['iscsiInfo']['igroupCreationRequest'] = dict()
+ quote['iscsiInfo']['igroupCreationRequest']['igroupName'] = self.parameters['igroups'][0]
+ iqn_list = []
+ for initiator in self.parameters['initiators']:
+ if initiator.get('iqn'):
+ iqn_list.append(initiator['iqn'])
+ current_initiator = self.get_initiator(initiator['alias'])
+ if current_initiator is None:
+ initiator_request = dict()
+ if initiator.get('alias'):
+ initiator_request['aliasName'] = initiator['alias']
+ if initiator.get('iqn'):
+ initiator_request['iqn'] = initiator['iqn']
+ self.create_initiator(initiator_request)
+ quote['iscsiInfo']['igroupCreationRequest']['initiators'] = iqn_list
+ quote['iscsiInfo']['osName'] = self.parameters['os_name']
+
+ else:
+ quote['iscsiInfo']['igroups'] = self.parameters['igroups']
+ quote['iscsiInfo']['osName'] = self.parameters['os_name']
+ return quote
+
+ def apply(self):
+ current = self.get_volume()
+ cd_action, modify = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ unmodifiable = []
+ for attr in modify:
+ if attr not in ['export_policy_ip', 'export_policy_nfs_version', 'snapshot_policy_name', 'users',
+ 'permission', 'tiering_policy', 'snapshot_policy_name']:
+ unmodifiable.append(attr)
+ if len(unmodifiable) > 0:
+ self.module.fail_json(changed=False, msg="%s cannot be modified." % str(unmodifiable))
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_volume()
+ elif cd_action == 'delete':
+ self.delete_volume()
+ elif modify:
+ self.modify_volume(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Main Function'''
+ volume = NetAppCloudmanagerVolume()
+ volume.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/requirements.txt b/ansible_collections/netapp/cloudmanager/requirements.txt
new file mode 100644
index 000000000..169c9049e
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/requirements.txt
@@ -0,0 +1,10 @@
+requests
+boto3
+botocore
+azure-mgmt-compute>=20.0.0
+azure-mgmt-core>=1.2.2
+azure-mgmt-network>=18.0.0
+azure-mgmt-resource>=16.1.0
+azure-mgmt-storage>=17.1.0
+msrestazure
+azure-common \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/compat/__init__.py b/ansible_collections/netapp/cloudmanager/tests/unit/compat/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/compat/__init__.py
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/compat/builtins.py b/ansible_collections/netapp/cloudmanager/tests/unit/compat/builtins.py
new file mode 100644
index 000000000..f60ee6782
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/compat/builtins.py
@@ -0,0 +1,33 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+#
+# Compat for python2.7
+#
+
+# One unittest needs to import builtins via __import__() so we need to have
+# the string that represents it
+try:
+ import __builtin__
+except ImportError:
+ BUILTINS = 'builtins'
+else:
+ BUILTINS = '__builtin__'
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/compat/mock.py b/ansible_collections/netapp/cloudmanager/tests/unit/compat/mock.py
new file mode 100644
index 000000000..0972cd2e8
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/compat/mock.py
@@ -0,0 +1,122 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python3.x's unittest.mock module
+'''
+import sys
+
+# Python 2.7
+
+# Note: Could use the pypi mock library on python3.x as well as python2.x. It
+# is the same as the python3 stdlib mock library
+
+try:
+ # Allow wildcard import because we really do want to import all of mock's
+ # symbols into this compat shim
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ from unittest.mock import *
+except ImportError:
+ # Python 2
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ try:
+ from mock import *
+ except ImportError:
+ print('You need the mock library installed on python2.x to run tests')
+
+
+# Prior to 3.4.4, mock_open cannot handle binary read_data
+if sys.version_info >= (3,) and sys.version_info < (3, 4, 4):
+ file_spec = None
+
+ def _iterate_read_data(read_data):
+ # Helper for mock_open:
+ # Retrieve lines from read_data via a generator so that separate calls to
+ # readline, read, and readlines are properly interleaved
+ sep = b'\n' if isinstance(read_data, bytes) else '\n'
+ data_as_list = [l + sep for l in read_data.split(sep)]
+
+ if data_as_list[-1] == sep:
+ # If the last line ended in a newline, the list comprehension will have an
+ # extra entry that's just a newline. Remove this.
+ data_as_list = data_as_list[:-1]
+ else:
+ # If there wasn't an extra newline by itself, then the file being
+ # emulated doesn't have a newline to end the last line remove the
+ # newline that our naive format() added
+ data_as_list[-1] = data_as_list[-1][:-1]
+
+ for line in data_as_list:
+ yield line
+
+ def mock_open(mock=None, read_data=''):
+ """
+ A helper function to create a mock to replace the use of `open`. It works
+ for `open` called directly or used as a context manager.
+
+ The `mock` argument is the mock object to configure. If `None` (the
+ default) then a `MagicMock` will be created for you, with the API limited
+ to methods or attributes available on standard file handles.
+
+ `read_data` is a string for the `read` methoddline`, and `readlines` of the
+ file handle to return. This is an empty string by default.
+ """
+ def _readlines_side_effect(*args, **kwargs):
+ if handle.readlines.return_value is not None:
+ return handle.readlines.return_value
+ return list(_data)
+
+ def _read_side_effect(*args, **kwargs):
+ if handle.read.return_value is not None:
+ return handle.read.return_value
+ return type(read_data)().join(_data)
+
+ def _readline_side_effect():
+ if handle.readline.return_value is not None:
+ while True:
+ yield handle.readline.return_value
+ for line in _data:
+ yield line
+
+ global file_spec
+ if file_spec is None:
+ import _io
+ file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
+
+ if mock is None:
+ mock = MagicMock(name='open', spec=open)
+
+ handle = MagicMock(spec=file_spec)
+ handle.__enter__.return_value = handle
+
+ _data = _iterate_read_data(read_data)
+
+ handle.write.return_value = None
+ handle.read.return_value = None
+ handle.readline.return_value = None
+ handle.readlines.return_value = None
+
+ handle.read.side_effect = _read_side_effect
+ handle.readline.side_effect = _readline_side_effect()
+ handle.readlines.side_effect = _readlines_side_effect
+
+ mock.return_value = handle
+ return mock
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/compat/unittest.py b/ansible_collections/netapp/cloudmanager/tests/unit/compat/unittest.py
new file mode 100644
index 000000000..73a20cf8c
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/compat/unittest.py
@@ -0,0 +1,44 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python2.7's unittest module
+'''
+
+import sys
+
+import pytest
+
+# Allow wildcard import because we really do want to import all of
+# unittests's symbols into this compat shim
+# pylint: disable=wildcard-import,unused-wildcard-import
+if sys.version_info < (2, 7):
+ try:
+ # Need unittest2 on python2.6
+ from unittest2 import *
+ except ImportError:
+ print('You need unittest2 installed on python2.6.x to run tests')
+
+ class TestCase:
+ """ skip everything """
+ pytestmark = pytest.mark.skip('Skipping Unit Tests on 2.6 as unittest2 may not be available')
+else:
+ from unittest import *
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp.py
new file mode 100644
index 000000000..959cbaef5
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp.py
@@ -0,0 +1,506 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2021, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" unit tests for module_utils netapp.py
+
+ Provides wrappers for cloudmanager REST APIs
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# import copy # for deepcopy
+import json
+import pytest
+import sys
+try:
+ import requests.exceptions
+ HAS_REQUESTS_EXC = True
+except ImportError:
+ HAS_REQUESTS_EXC = False
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+
+if (not netapp_utils.HAS_REQUESTS or not HAS_REQUESTS_EXC) and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockModule():
+ ''' rough mock for an Ansible module class '''
+ def __init__(self):
+ self.params = {}
+
+ def fail_json(self, *args, **kwargs): # pylint: disable=unused-argument
+ """function to simulate fail_json: package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class mockResponse:
+ def __init__(self, json_data, status_code, headers=None, raise_action=None):
+ self.json_data = json_data
+ self.status_code = status_code
+ self.content = json_data
+ self.headers = headers or {}
+ self.raise_action = raise_action
+
+ def raise_for_status(self):
+ pass
+
+ def json(self):
+ if self.raise_action == 'bad_json':
+ raise ValueError(self.raise_action)
+ return self.json_data
+
+
+def create_module(args):
+ argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ set_module_args(args)
+ module = basic.AnsibleModule(argument_spec)
+ module.fail_json = fail_json
+ return module
+
+
+def create_restapi_object(args):
+ module = create_module(args)
+ return netapp_utils.CloudManagerRestAPI(module)
+
+
+def mock_args(feature_flags=None, client_id=None):
+ args = {
+ 'refresh_token': 'ABCDEFGS'
+ }
+ if feature_flags is not None:
+ args['feature_flags'] = feature_flags
+ if client_id is not None:
+ args['client_id'] = client_id
+ return args
+
+
+TOKEN_DICT = {
+ 'access_token': 'access_token',
+ 'token_type': 'token_type'
+}
+
+
+def test_missing_params():
+ module = MockModule()
+ with pytest.raises(KeyError) as exc:
+ netapp_utils.CloudManagerRestAPI(module)
+ assert exc.match('refresh_token')
+
+
+@patch('requests.request')
+def test_get_token_refresh(mock_request):
+ ''' successfully get token using refresh token '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ ]
+ # get_token is called when the object is created
+ rest_api = create_restapi_object(mock_args())
+ print(rest_api.token_type, rest_api.token)
+ assert rest_api.token_type == TOKEN_DICT['token_type']
+ assert rest_api.token == TOKEN_DICT['access_token']
+
+
+@patch('requests.request')
+def test_negative_get_token_none(mock_request):
+ ''' missing refresh token and Service Account '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ ]
+ # get_token is called when the object is created
+ args = dict(mock_args())
+ args.pop('refresh_token')
+ # get_token is called when the object is created
+ with pytest.raises(AnsibleFailJson) as exc:
+ rest_api = create_restapi_object(args)
+ msg = 'Missing refresh_token or sa_client_id and sa_secret_key'
+ assert msg in exc.value.args[0]['msg']
+
+
+@patch('requests.request')
+def test_get_token_sa(mock_request):
+ ''' successfully get token using Service Account '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ ]
+ # get_token is called when the object is created
+ args = dict(mock_args())
+ args.pop('refresh_token')
+ args['sa_client_id'] = '123'
+ args['sa_secret_key'] = 'a1b2c3'
+ rest_api = create_restapi_object(args)
+ print(rest_api.token_type, rest_api.token)
+ assert rest_api.token_type == TOKEN_DICT['token_type']
+ assert rest_api.token == TOKEN_DICT['access_token']
+
+
+@patch('requests.request')
+def test_negative_get_token(mock_request):
+ ''' error on OAUTH request '''
+ mock_request.side_effect = [
+ mockResponse(json_data={'message': 'error message'}, status_code=206)
+ ]
+ # get_token is called when the object is created
+ with pytest.raises(AnsibleFailJson) as exc:
+ rest_api = create_restapi_object(mock_args())
+ msg = 'Error acquiring token: error message'
+ assert msg in exc.value.args[0]['msg']
+
+
+@patch('requests.request')
+def test_get_json(mock_request):
+ ''' get with no data '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data={'key': 'value'}, status_code=200, headers={'OnCloud-Request-Id': 'OCR_id'})
+ ]
+ rest_api = create_restapi_object(mock_args())
+ message, error, ocr = rest_api.get('api', None)
+ print(message, error, ocr)
+ assert message == {'key': 'value'}
+ assert error is None
+ assert ocr == 'OCR_id'
+
+
+@patch('time.sleep')
+@patch('requests.request')
+def test_get_retries(mock_request, dont_sleep):
+ ''' get with no data '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ requests.exceptions.ConnectionError('Max retries exceeded with url:'),
+ requests.exceptions.ConnectionError('Max retries exceeded with url:'),
+ mockResponse(json_data={'key': 'value'}, status_code=200, headers={'OnCloud-Request-Id': 'OCR_id'})
+ ]
+ rest_api = create_restapi_object(mock_args())
+ message, error, ocr = rest_api.get('api', None)
+ print(message, error, ocr)
+ assert message == {'key': 'value'}
+ assert error is None
+ assert ocr == 'OCR_id'
+
+
+@patch('time.sleep')
+@patch('requests.request')
+def test_get_retries_exceeded(mock_request, dont_sleep):
+ ''' get with no data '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ requests.exceptions.ConnectionError('Max retries exceeded with url:'),
+ requests.exceptions.ConnectionError('Max retries exceeded with url:'),
+ requests.exceptions.ConnectionError('Max retries exceeded with url:'),
+ mockResponse(json_data={'key': 'value'}, status_code=200, headers={'OnCloud-Request-Id': 'OCR_id'})
+ ]
+ rest_api = create_restapi_object(mock_args())
+ message, error, ocr = rest_api.get('api', None)
+ print(message, error, ocr)
+ assert 'Max retries exceeded with url:' in error
+
+
+@patch('requests.request')
+def test_empty_get_sent_bad_json(mock_request):
+ ''' get with invalid json '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data='anything', status_code=200, raise_action='bad_json')
+ ]
+ rest_api = create_restapi_object(mock_args())
+ message, error, ocr = rest_api.get('api', None)
+ print(message, error, ocr)
+ assert message is None
+ assert error is None
+ assert ocr is None
+
+
+@patch('requests.request')
+def test_empty_get_sent_203(mock_request):
+ ''' get with no data and 203 status code '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data={}, status_code=203)
+ ]
+ rest_api = create_restapi_object(mock_args())
+ message, error, ocr = rest_api.get('api', None)
+ print(message, error, ocr)
+ assert message == {}
+ assert error is None
+ assert ocr is None
+
+
+@patch('requests.request')
+def test_negative_get_sent_203(mock_request):
+ ''' get with 203 status code - not sure we should error out here '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data={'message': 'error message'}, status_code=203)
+ ]
+ rest_api = create_restapi_object(mock_args())
+ message, error, ocr = rest_api.get('api', None)
+ print(message, error, ocr)
+ assert message == {'message': 'error message'}
+ assert error == 'error message'
+ assert ocr is None
+
+
+@patch('requests.request')
+def test_negative_get_sent_300(mock_request):
+ ''' get with 300 status code - 300 indicates an error '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data={}, status_code=300)
+ ]
+ rest_api = create_restapi_object(mock_args())
+ message, error, ocr = rest_api.get('api', None)
+ print(message, error, ocr)
+ assert message == {}
+ assert error == '300'
+ assert ocr is None
+
+
+@patch('requests.request')
+def test_negative_get_raise_http_exc(mock_request):
+ ''' get with HTTPError exception '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ requests.exceptions.HTTPError('some exception')
+ ]
+ rest_api = create_restapi_object(mock_args())
+ message, error, ocr = rest_api.get('api', None)
+ print(message, error, ocr)
+ assert message is None
+ assert error == 'some exception'
+ assert ocr is None
+
+
+@patch('requests.request')
+def test_negative_get_raise_conn_exc(mock_request):
+ ''' get with ConnectionError exception '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ requests.exceptions.ConnectionError('some exception')
+ ]
+ rest_api = create_restapi_object(mock_args())
+ message, error, ocr = rest_api.get('api', None)
+ print(message, error, ocr)
+ assert message is None
+ assert error == 'some exception'
+ assert ocr is None
+
+
+@patch('requests.request')
+def test_negative_get_raise_oserror_exc(mock_request):
+ ''' get with a general exception '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ OSError('some exception')
+ ]
+ rest_api = create_restapi_object(mock_args())
+ message, error, ocr = rest_api.get('api', None)
+ print(message, error, ocr)
+ assert message is None
+ assert error == 'some exception'
+ assert ocr is None
+
+
+def test_has_feature_success_default():
+ ''' existing feature_flag with default '''
+ flag = 'show_modified'
+ module = create_module(mock_args())
+ value = netapp_utils.has_feature(module, flag)
+ assert value
+
+
+def test_has_feature_success_user_true():
+ ''' existing feature_flag with value set to True '''
+ flag = 'user_deprecation_warning'
+ args = dict(mock_args({flag: True}))
+ module = create_module(args)
+ value = netapp_utils.has_feature(module, flag)
+ assert value
+
+
+def test_has_feature_success_user_false():
+ ''' existing feature_flag with value set to False '''
+ flag = 'user_deprecation_warning'
+ args = dict(mock_args({flag: False}))
+ print(args)
+ module = create_module(args)
+ value = netapp_utils.has_feature(module, flag)
+ assert not value
+
+
+def test_has_feature_invalid_key():
+ ''' existing feature_flag with unknown key '''
+ flag = 'deprecation_warning_bad_key'
+ module = create_module(mock_args())
+ with pytest.raises(AnsibleFailJson) as exc:
+ netapp_utils.has_feature(module, flag)
+ msg = 'Internal error: unexpected feature flag: %s' % flag
+ assert exc.value.args[0]['msg'] == msg
+
+
+def test_has_feature_invalid_bool():
+ ''' existing feature_flag with non boolean value '''
+ flag = 'deprecation_warning_key'
+ module = create_module(mock_args({flag: 'str'}))
+ with pytest.raises(AnsibleFailJson) as exc:
+ netapp_utils.has_feature(module, flag)
+ msg = "Error: expected bool type for feature flag"
+ assert msg in exc.value.args[0]['msg']
+
+
+STATUS_DICT = {
+ 'status': 1,
+ 'error': None
+}
+
+
+@patch('time.sleep')
+@patch('requests.request')
+def test_check_task_status(mock_request, mock_sleep):
+ ''' successful get with 2 retries '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ OSError('some exception'),
+ requests.exceptions.ConnectionError('some exception'),
+ mockResponse(json_data=STATUS_DICT, status_code=200)
+ ]
+ rest_api = create_restapi_object(mock_args())
+ rest_api.module.params['client_id'] = '123'
+ status, error_msg, error = rest_api.check_task_status('api')
+ assert status == STATUS_DICT['status']
+ assert error_msg == STATUS_DICT['error']
+ assert error is None
+
+
+@patch('time.sleep')
+@patch('requests.request')
+def test_negative_check_task_status(mock_request, mock_sleep):
+ ''' get with 4 failed retries '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ OSError('some exception'),
+ requests.exceptions.ConnectionError('some exception'),
+ requests.exceptions.ConnectionError('some exception'),
+ requests.exceptions.HTTPError('some exception'),
+ ]
+ rest_api = create_restapi_object(mock_args())
+ rest_api.module.params['client_id'] = '123'
+ status, error_msg, error = rest_api.check_task_status('api')
+ assert status == 0
+ assert error_msg == ''
+ assert error == 'some exception'
+
+
+@patch('time.sleep')
+@patch('requests.request')
+def test_wait_on_completion(mock_request, mock_sleep):
+ ''' successful get with 2 retries '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ OSError('some exception'),
+ requests.exceptions.ConnectionError('some exception'),
+ mockResponse(json_data=STATUS_DICT, status_code=200)
+ ]
+ rest_api = create_restapi_object(mock_args())
+ rest_api.module.params['client_id'] = '123'
+ error = rest_api.wait_on_completion('api', 'action', 'task', 2, 1)
+ assert error is None
+
+
+@patch('time.sleep')
+@patch('requests.request')
+def test_negative_wait_on_completion_failure(mock_request, mock_sleep):
+ ''' successful get with 2 retries, but status is -1 '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ OSError('some exception'),
+ requests.exceptions.ConnectionError('some exception'),
+ mockResponse(json_data={'status': -1, 'error': 'task_error'}, status_code=200)
+ ]
+ rest_api = create_restapi_object(mock_args())
+ rest_api.module.params['client_id'] = '123'
+ error = rest_api.wait_on_completion('api', 'action', 'task', 2, 1)
+ assert error == 'Failed to task action, error: task_error'
+
+
+@patch('time.sleep')
+@patch('requests.request')
+def test_negative_wait_on_completion_error(mock_request, mock_sleep):
+ ''' get with 4 failed retries '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ OSError('some exception'),
+ requests.exceptions.ConnectionError('some exception'),
+ requests.exceptions.ConnectionError('some exception'),
+ requests.exceptions.HTTPError('some http exception'),
+ ]
+ rest_api = create_restapi_object(mock_args())
+ rest_api.module.params['client_id'] = '123'
+ error = rest_api.wait_on_completion('api', 'action', 'task', 2, 1)
+ assert error == 'some http exception'
+
+
+@patch('time.sleep')
+@patch('requests.request')
+def test_negative_wait_on_completion_timeout(mock_request, mock_sleep):
+ ''' successful get with 2 retries, but status is 0 '''
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ OSError('some exception'),
+ requests.exceptions.ConnectionError('some exception'),
+ mockResponse(json_data={'status': 0, 'error': 'task_error'}, status_code=200),
+ mockResponse(json_data={'status': 0, 'error': 'task_error'}, status_code=200),
+ mockResponse(json_data={'status': 0, 'error': 'task_error'}, status_code=200)
+ ]
+ rest_api = create_restapi_object(mock_args())
+ rest_api.module.params['client_id'] = '123'
+ error = rest_api.wait_on_completion('api', 'action', 'task', 2, 1)
+ assert error == 'Taking too long for action to task or not properly setup'
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module.py
new file mode 100644
index 000000000..33041f64f
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module.py
@@ -0,0 +1,578 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2021, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" unit tests for module_utils netapp.py
+
+ Provides wrappers for cloudmanager REST APIs
+"""
+
+from __future__ import (absolute_import, division, print_function)
+from logging import error
+__metaclass__ = type
+
+# import copy # for deepcopy
+import json
+import sys
+import pytest
+try:
+ import requests.exceptions
+ HAS_REQUESTS_EXC = True
+except ImportError:
+ HAS_REQUESTS_EXC = False
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import cmp as nm_cmp, NetAppModule
+if (not netapp_utils.HAS_REQUESTS or not HAS_REQUESTS_EXC) and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7')
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class mockResponse:
+ def __init__(self, json_data, status_code, headers=None):
+ self.json_data = json_data
+ self.status_code = status_code
+ self.content = json_data
+ self.headers = headers or {}
+
+ def json(self):
+ return self.json_data
+
+
+def create_module(args):
+ argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ set_module_args(args)
+ module = basic.AnsibleModule(argument_spec)
+ return module
+
+
+def create_restapi_object(args):
+ module = create_module(args)
+ return netapp_utils.CloudManagerRestAPI(module)
+
+
+def mock_args(feature_flags=None, client_id=None):
+ args = {
+ 'refresh_token': 'ABCDEFGS'
+ }
+ return args
+
+
+TOKEN_DICT = {
+ 'access_token': 'access_token',
+ 'token_type': 'token_type'
+}
+
+
+def test_cmp():
+ assert nm_cmp(None, 'x') == -1
+ assert nm_cmp('y', 'x') == 1
+ assert nm_cmp('y', 'X') == 1
+ assert nm_cmp(['x', 'y'], ['x', 'X']) == 1
+ assert nm_cmp(['x', 'x'], ['x', 'X']) == 0
+
+
+def test_set_parameters():
+ helper = NetAppModule()
+ helper.set_parameters({'a': None, 'b': 'b'})
+ assert 'a' not in helper.parameters
+ assert 'b' in helper.parameters
+
+
+def test_cd_action():
+ desired = {}
+ helper = NetAppModule()
+ assert helper.get_cd_action(None, desired) == 'create'
+ desired['state'] = 'present'
+ assert helper.get_cd_action(None, desired) == 'create'
+ assert helper.get_cd_action({}, desired) is None
+ desired['state'] = 'absent'
+ assert helper.get_cd_action(None, desired) is None
+ assert helper.get_cd_action({}, desired) == 'delete'
+
+
+def test_compare_and_update_values():
+ current = {'a': 'a', 'b': 'b'}
+ desired = {}
+ desired_key = []
+ helper = NetAppModule()
+ assert helper.compare_and_update_values(current, desired, desired_key) == ({}, False)
+ desired_key = ['a']
+ assert helper.compare_and_update_values(current, desired, desired_key) == ({'a': 'a'}, False)
+ desired = {'a': 'a'}
+ assert helper.compare_and_update_values(current, desired, desired_key) == ({'a': 'a'}, False)
+ desired = {'a': 'c'}
+ assert helper.compare_and_update_values(current, desired, desired_key) == ({'a': 'c'}, True)
+
+
+@patch('requests.request')
+def test_get_working_environments_info(mock_request):
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data={'a': 'b'}, status_code=200),
+ mockResponse(json_data={'c': 'd'}, status_code=500)
+ ]
+ helper = NetAppModule()
+ rest_api = create_restapi_object(mock_args())
+ assert helper.get_working_environments_info(rest_api, '') == ({'a': 'b'}, None)
+ assert helper.get_working_environments_info(rest_api, '') == ({'c': 'd'}, '500')
+
+
+def test_look_up_working_environment_by_name_in_list():
+ we_list = [{'name': 'bob', 'b': 'b'}, {'name': 'chuck', 'c': 'c'}]
+ helper = NetAppModule()
+ assert helper.look_up_working_environment_by_name_in_list(we_list, 'bob') == (we_list[0], None)
+ error = "look_up_working_environment_by_name_in_list: Working environment not found"
+ assert helper.look_up_working_environment_by_name_in_list(we_list, 'alice') == (None, error)
+
+
+@patch('requests.request')
+def test_get_working_environment_details_by_name(mock_request):
+ we_list = [{'name': 'bob', 'b': 'b'}, {'name': 'chuck', 'c': 'c'}]
+ json_data = {'onPremWorkingEnvironments': [],
+ 'gcpVsaWorkingEnvironments': [],
+ 'azureVsaWorkingEnvironments': [],
+ 'vsaWorkingEnvironments': []
+ }
+ json_data_onprem = dict(json_data)
+ json_data_onprem['onPremWorkingEnvironments'] = we_list
+ json_data_gcp = dict(json_data)
+ json_data_gcp['gcpVsaWorkingEnvironments'] = we_list
+ json_data_azure = dict(json_data)
+ json_data_azure['azureVsaWorkingEnvironments'] = we_list
+ json_data_aws = dict(json_data)
+ json_data_aws['vsaWorkingEnvironments'] = we_list
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data={'a': 'b'}, status_code=500), # exists
+ mockResponse(json_data={'a': 'b'}, status_code=200), # exists
+ mockResponse(json_data={'c': 'd'}, status_code=400), # get all
+ mockResponse(json_data={'a': 'b'}, status_code=200), # exists
+ mockResponse(json_data=json_data_onprem, status_code=200), # get all
+ mockResponse(json_data={'a': 'b'}, status_code=200), # exists
+ mockResponse(json_data=json_data_gcp, status_code=200), # get all
+ mockResponse(json_data={'a': 'b'}, status_code=200), # exists
+ mockResponse(json_data=json_data_azure, status_code=200), # get all
+ mockResponse(json_data={'a': 'b'}, status_code=200), # exists
+ mockResponse(json_data=json_data_aws, status_code=200), # get all
+ mockResponse(json_data={'a': 'b'}, status_code=200), # exists
+ mockResponse(json_data=json_data, status_code=200), # get all
+ ]
+ helper = NetAppModule()
+ rest_api = create_restapi_object(mock_args())
+ assert helper.get_working_environment_details_by_name(rest_api, '', 'name') == (None, '500')
+ assert helper.get_working_environment_details_by_name(rest_api, '', 'name') == (None, '400')
+ assert helper.get_working_environment_details_by_name(rest_api, '', 'bob') == (we_list[0], None)
+ assert helper.get_working_environment_details_by_name(rest_api, '', 'bob') == (we_list[0], None)
+ assert helper.get_working_environment_details_by_name(rest_api, '', 'bob') == (we_list[0], None)
+ assert helper.get_working_environment_details_by_name(rest_api, '', 'bob') == (we_list[0], None)
+ error = "get_working_environment_details_by_name: Working environment not found"
+ assert helper.get_working_environment_details_by_name(rest_api, '', 'bob') == (None, error)
+
+
+@patch('requests.request')
+def test_get_working_environment_details(mock_request):
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data={'key': [{'a': 'b'}]}, status_code=200),
+ mockResponse(json_data={'key': [{'c': 'd'}]}, status_code=500)
+ ]
+ helper = NetAppModule()
+ args = dict(mock_args())
+ rest_api = create_restapi_object(args)
+ helper.parameters['working_environment_id'] = 'test_we'
+ assert helper.get_working_environment_details(rest_api, '') == ({'key': [{'a': 'b'}]}, None)
+ error = "Error: get_working_environment_details 500"
+ assert helper.get_working_environment_details(rest_api, '') == (None, error)
+
+
+@patch('requests.request')
+def test_get_working_environment_detail_for_snapmirror(mock_request):
+ json_data = {'onPremWorkingEnvironments': [],
+ 'gcpVsaWorkingEnvironments': [],
+ 'azureVsaWorkingEnvironments': [],
+ 'vsaWorkingEnvironments': []
+ }
+ json_data_source = dict(json_data)
+ json_data_source['onPremWorkingEnvironments'] = [{'name': 'test_we_s'}]
+ json_data_destination = dict(json_data)
+ json_data_destination['onPremWorkingEnvironments'] = [{'name': 'test_we_d'}]
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ # by id, first test
+ mockResponse(json_data={'key': [{'publicId': 'test_we_s'}]}, status_code=200), # env details source
+ mockResponse(json_data={'key': [{'publicId': 'test_we_d'}]}, status_code=200), # env details dest
+ # by id, second test
+ mockResponse(json_data={'key': [{'c': 'd'}]}, status_code=500), # error source
+ # by id, third test
+ mockResponse(json_data={'key': [{'publicId': 'test_we_s'}]}, status_code=200), # env details source
+ mockResponse(json_data={'key': [{'e': 'f'}]}, status_code=500), # error source
+ # by name, first test
+ mockResponse(json_data={'a': 'b'}, status_code=200), # exists source
+ mockResponse(json_data=json_data_source, status_code=200), # env details source
+ mockResponse(json_data={'a': 'b'}, status_code=200), # exists dest
+ mockResponse(json_data=json_data_destination, status_code=200), # env details dest
+ # by name, second test
+ mockResponse(json_data={'key': {'c': 'd'}}, status_code=500), # error source
+ # by name, third test
+ mockResponse(json_data={'a': 'b'}, status_code=200), # exists source
+ mockResponse(json_data=json_data_source, status_code=200), # env details source
+ mockResponse(json_data={'key': {'e': 'f'}}, status_code=500), # error source
+ ]
+ helper = NetAppModule()
+ args = dict(mock_args())
+ rest_api = create_restapi_object(args)
+ # search by id
+ helper.parameters['source_working_environment_id'] = 'test_we_s'
+ helper.parameters['destination_working_environment_id'] = 'test_we_d'
+ assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == ({'publicId': 'test_we_s'}, {'publicId': 'test_we_d'}, None)
+ error = "Error getting WE info: 500: {'key': [{'c': 'd'}]}"
+ assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == (None, None, error)
+ error = "Error getting WE info: 500: {'key': [{'e': 'f'}]}"
+ assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == (None, None, error)
+ # search by name
+ del helper.parameters['source_working_environment_id']
+ del helper.parameters['destination_working_environment_id']
+ helper.parameters['source_working_environment_name'] = 'test_we_s'
+ helper.parameters['destination_working_environment_name'] = 'test_we_d'
+ assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == ({'name': 'test_we_s'}, {'name': 'test_we_d'}, None)
+ error = '500'
+ assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == (None, None, error)
+ error = '500'
+ assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == (None, None, error)
+ # no destination id nor name
+ del helper.parameters['destination_working_environment_name']
+ error = 'Cannot find working environment by destination_working_environment_id or destination_working_environment_name'
+ assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == (None, None, error)
+ # no source id nor name
+ del helper.parameters['source_working_environment_name']
+ error = 'Cannot find working environment by source_working_environment_id or source_working_environment_name'
+ assert helper.get_working_environment_detail_for_snapmirror(rest_api, '') == (None, None, error)
+
+
+def test_create_account():
+ helper = NetAppModule()
+ error = "Error: creating an account is not supported."
+ assert helper.create_account("rest_api") == (None, error)
+
+
+@patch('requests.request')
+def test_get_or_create_account(mock_request):
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200),
+ mockResponse(json_data=[], status_code=200),
+ mockResponse(json_data={'c': 'd'}, status_code=500)
+ ]
+ helper = NetAppModule()
+ rest_api = create_restapi_object(mock_args())
+ assert helper.get_or_create_account(rest_api) == ('account_id', None)
+ error = 'Error: account cannot be located - check credentials or provide account_id.'
+ assert helper.get_or_create_account(rest_api) == (None, error)
+ error = '500'
+ assert helper.get_or_create_account(rest_api) == (None, error)
+
+
+@patch('requests.request')
+def test_get_account_info(mock_request):
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200),
+ mockResponse(json_data=[], status_code=200),
+ mockResponse(json_data={'c': 'd'}, status_code=500)
+ ]
+ helper = NetAppModule()
+ rest_api = create_restapi_object(mock_args())
+ assert helper.get_account_info(rest_api, '') == ([{'accountPublicId': 'account_id'}], None)
+ assert helper.get_account_info(rest_api, '') == ([], None)
+ assert helper.get_account_info(rest_api, '') == (None, '500')
+
+
+@patch('requests.request')
+def test_get_account_id(mock_request):
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200),
+ mockResponse(json_data=[], status_code=200),
+ mockResponse(json_data={'c': 'd'}, status_code=500)
+ ]
+ helper = NetAppModule()
+ rest_api = create_restapi_object(mock_args())
+ assert helper.get_account_id(rest_api) == ('account_id', None)
+ error = 'Error: no account found - check credentials or provide account_id.'
+ assert helper.get_account_id(rest_api) == (None, error)
+ error = '500'
+ assert helper.get_account_id(rest_api) == (None, error)
+
+
+@patch('requests.request')
+def test_get_accounts_info(mock_request):
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200),
+ mockResponse(json_data={'c': 'd'}, status_code=500)
+ ]
+ helper = NetAppModule()
+ rest_api = create_restapi_object(mock_args())
+ assert helper.get_accounts_info(rest_api, '') == ([{'accountPublicId': 'account_id'}], None)
+ error = '500'
+ assert helper.get_accounts_info(rest_api, '') == (None, error)
+
+
+def test_set_api_root_path():
+ helper = NetAppModule()
+ helper.parameters['working_environment_id'] = 'abc'
+ working_environment_details = {'cloudProviderName': 'Amazon', 'isHA': False}
+ helper.set_api_root_path(working_environment_details, helper)
+ assert helper.api_root_path == '/occm/api/vsa'
+ working_environment_details = {'cloudProviderName': 'Other', 'isHA': False}
+ helper.set_api_root_path(working_environment_details, helper)
+ assert helper.api_root_path == '/occm/api/other/vsa'
+ working_environment_details = {'cloudProviderName': 'Other', 'isHA': True}
+ helper.set_api_root_path(working_environment_details, helper)
+ assert helper.api_root_path == '/occm/api/other/ha'
+
+
+@patch('requests.request')
+def test_get_occm_agents_by_account(mock_request):
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data=[{'a': 'b'}], status_code=200),
+ mockResponse(json_data=[{'c': 'd'}], status_code=500)
+ ]
+ helper = NetAppModule()
+ rest_api = create_restapi_object(mock_args())
+ assert helper.get_occm_agents_by_account(rest_api, '') == ([{'a': 'b'}], None)
+ error = '500'
+ assert helper.get_occm_agents_by_account(rest_api, '') == ([{'c': 'd'}], error)
+
+
+@patch('requests.request')
+def test_get_occm_agents_by_name(mock_request):
+ json_data = {'agents':
+ [{'name': '', 'provider': ''},
+ {'name': 'a1', 'provider': 'p1'},
+ {'name': 'a1', 'provider': 'p1'},
+ {'name': 'a1', 'provider': 'p2'},
+ {'name': 'a2', 'provider': 'p1'},
+ ]}
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data=json_data, status_code=200),
+ mockResponse(json_data=json_data, status_code=500)
+ ]
+ helper = NetAppModule()
+ rest_api = create_restapi_object(mock_args())
+ expected = [agent for agent in json_data['agents'] if agent['name'] == 'a1' and agent['provider'] == 'p1']
+ assert helper.get_occm_agents_by_name(rest_api, 'account', 'a1', 'p1') == (expected, None)
+ error = '500'
+ assert helper.get_occm_agents_by_name(rest_api, 'account', 'a1', 'p1') == (expected, error)
+
+
+@patch('requests.request')
+def test_get_agents_info(mock_request):
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200), # get account_id
+ mockResponse(json_data=[{'a': 'b'}], status_code=200),
+ mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200), # get account_id
+ mockResponse(json_data=[{'c': 'd'}], status_code=500),
+ mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=400), # get account_id
+ ]
+ helper = NetAppModule()
+ rest_api = create_restapi_object(mock_args())
+ assert helper.get_agents_info(rest_api, '') == ([{'a': 'b'}], None)
+ error = '500'
+ assert helper.get_agents_info(rest_api, '') == ([{'c': 'd'}], error)
+ error = '400'
+ assert helper.get_agents_info(rest_api, '') == (None, error)
+
+
+@patch('requests.request')
+def test_get_active_agents_info(mock_request):
+ json_data = {'agents':
+ [{'name': '', 'provider': '', 'agentId': 1, 'status': ''},
+ {'name': 'a1', 'provider': 'p1', 'agentId': 1, 'status': 'active'},
+ {'name': 'a1', 'provider': 'p1', 'agentId': 1, 'status': ''},
+ {'name': 'a1', 'provider': 'p2', 'agentId': 1, 'status': 'active'},
+ {'name': 'a2', 'provider': 'p1', 'agentId': 1, 'status': 'active'},
+ ]}
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200), # get account_id
+ mockResponse(json_data=json_data, status_code=200),
+ mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=200), # get account_id
+ mockResponse(json_data=json_data, status_code=500),
+ mockResponse(json_data=[{'accountPublicId': 'account_id'}], status_code=400), # get account_id
+ ]
+ helper = NetAppModule()
+ rest_api = create_restapi_object(mock_args())
+ active = [agent for agent in json_data['agents'] if agent['status'] == 'active']
+ expected = [{'name': agent['name'], 'client_id': agent['agentId'], 'provider': agent['provider']} for agent in active]
+ assert helper.get_active_agents_info(rest_api, '') == (expected, None)
+ error = '500'
+ assert helper.get_active_agents_info(rest_api, '') == (expected, error)
+ error = '400'
+ assert helper.get_active_agents_info(rest_api, '') == (None, error)
+
+
+@patch('requests.request')
+def test_get_occm_agent_by_id(mock_request):
+ json_data = {'agent':
+ {'name': 'a1', 'provider': 'p1', 'agentId': 1, 'status': 'active'}
+ }
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data=json_data, status_code=200),
+ mockResponse(json_data=json_data, status_code=500),
+ mockResponse(json_data={'a': 'b'}, status_code=500),
+ ]
+ helper = NetAppModule()
+ rest_api = create_restapi_object(mock_args())
+ expected = json_data['agent']
+ assert helper.get_occm_agent_by_id(rest_api, '') == (expected, None)
+ error = '500'
+ assert helper.get_occm_agent_by_id(rest_api, '') == (expected, error)
+ assert helper.get_occm_agent_by_id(rest_api, '') == ({'a': 'b'}, error)
+
+
+@patch('requests.request')
+def test_check_occm_status(mock_request):
+ json_data = {'agent':
+ {'name': 'a1', 'provider': 'p1', 'agentId': 1, 'status': 'active'}
+ }
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data=json_data, status_code=200),
+ mockResponse(json_data=json_data, status_code=500)
+ ]
+ helper = NetAppModule()
+ rest_api = create_restapi_object(mock_args())
+ expected = json_data
+ assert helper.check_occm_status(rest_api, '') == (expected, None)
+ error = '500'
+ assert helper.check_occm_status(rest_api, '') == (expected, error)
+
+
+@patch('requests.request')
+def test_register_agent_to_service(mock_request):
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data={}, status_code=200),
+ mockResponse(json_data={}, status_code=200),
+ mockResponse(json_data={}, status_code=500)
+ ]
+ helper = NetAppModule()
+ rest_api = create_restapi_object(mock_args())
+ helper.parameters['account_id'] = 'account_id'
+ helper.parameters['company'] = 'company'
+ helper.parameters['region'] = 'region'
+ helper.parameters['subnet_id'] = 'subnet_id'
+ expected = {}
+ assert helper.register_agent_to_service(rest_api, 'provider', 'vpc') == (expected, None)
+ args, kwargs = mock_request.call_args
+ body = kwargs['json']
+ assert 'placement' in body
+ assert 'network' in body['placement']
+ assert body['placement']['network'] == 'vpc'
+ body_other = body
+ assert helper.register_agent_to_service(rest_api, 'AWS', 'vpc') == (expected, None)
+ args, kwargs = mock_request.call_args
+ body = kwargs['json']
+ assert 'placement' in body
+ assert 'network' in body['placement']
+ assert body['placement']['network'] == 'vpc'
+ assert body_other != body
+ body['placement']['provider'] = 'provider'
+ assert body_other == body
+ error = '500'
+ assert helper.register_agent_to_service(rest_api, 'provider', 'vpc') == (expected, error)
+
+
+@patch('requests.request')
+def test_delete_occm(mock_request):
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data={'result': 'any'}, status_code=200),
+ mockResponse(json_data={'result': 'any'}, status_code=500),
+ ]
+ helper = NetAppModule()
+ helper.parameters['account_id'] = 'account_id'
+ rest_api = create_restapi_object(mock_args())
+ assert helper.delete_occm(rest_api, '') == ({'result': 'any'}, None)
+ error = '500'
+ assert helper.delete_occm(rest_api, '') == ({'result': 'any'}, error)
+
+
+@patch('requests.request')
+def test_delete_occm_agents(mock_request):
+ agents = [{'agentId': 'a1'},
+ {'agentId': 'a2'}]
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data={'result': 'any'}, status_code=200), # a1
+ mockResponse(json_data={'result': 'any'}, status_code=200), # a2
+ mockResponse(json_data={'result': 'any'}, status_code=500), # a1
+ mockResponse(json_data={'result': 'any'}, status_code=200), # a2
+ mockResponse(json_data={'result': 'any'}, status_code=200), # a1
+ mockResponse(json_data={'result': 'any'}, status_code=200), # a2
+ ]
+ helper = NetAppModule()
+ helper.parameters['account_id'] = 'account_id'
+ rest_api = create_restapi_object(mock_args())
+ assert helper.delete_occm_agents(rest_api, agents) == []
+ error = '500'
+ assert helper.delete_occm_agents(rest_api, agents) == [({'result': 'any'}, error)]
+ agents.append({'a': 'b'})
+ error = "unexpected agent contents: {'a': 'b'}"
+ assert helper.delete_occm_agents(rest_api, agents) == [(None, error)]
+
+
+@patch('requests.request')
+def test_get_tenant(mock_request):
+ tenants = [{'publicId': 'a1'},
+ {'publicId': 'a2'}]
+ mock_request.side_effect = [
+ mockResponse(json_data=TOKEN_DICT, status_code=200), # OAUTH
+ mockResponse(json_data=tenants, status_code=200), # get success
+ mockResponse(json_data={'result': 'any'}, status_code=500), # get error
+ ]
+ helper = NetAppModule()
+ # helper.parameters['account_id'] = 'account_id'
+ rest_api = create_restapi_object(mock_args())
+ assert helper.get_tenant(rest_api, '') == ('a1', None)
+ error = "Error: unexpected response on getting tenant for cvo: 500, {'result': 'any'}"
+ assert helper.get_tenant(rest_api, '') == (None, error)
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module_open.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module_open.py
new file mode 100644
index 000000000..b24778f47
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/module_utils/test_netapp_module_open.py
@@ -0,0 +1,77 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2021, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" unit tests for module_utils netapp_module.py
+
+ Provides utility functions for cloudmanager REST APIs
+"""
+
+from __future__ import (absolute_import, division, print_function)
+from logging import error
+__metaclass__ = type
+
+import pytest
+import sys
+
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+
+if sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as builtins not defined for 2.6 and 2.7')
+
+
+@patch('builtins.open')
+def test_certificates(open):
+ open.return_value = OPEN(data=b'1234')
+ helper = NetAppModule()
+ assert helper.encode_certificates('test') == ('MTIzNA==', None)
+ open.return_value = OPEN(data=b'')
+ helper = NetAppModule()
+ assert helper.encode_certificates('test') == (None, 'Error: file is empty')
+ open.return_value = OPEN(raise_exception=True)
+ helper = NetAppModule()
+ assert helper.encode_certificates('test') == (None, 'intentional error')
+
+
+class OPEN:
+ '''we could use mock_open but I'm not sure it's available in every python version '''
+ def __init__(self, data=b'abcd', raise_exception=False):
+ self.data = data
+ self.raise_exception = raise_exception
+
+ def read(self):
+ return self.data
+ # the following two methods are associated with "with" in with open ...
+
+ def __enter__(self):
+ if self.raise_exception:
+ raise OSError('intentional error')
+ return self
+
+ def __exit__(self, *args):
+ pass
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aggregate.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aggregate.py
new file mode 100644
index 000000000..db30ada89
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aggregate.py
@@ -0,0 +1,297 @@
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: '''
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import sys
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aggregate \
+ import NetAppCloudmanagerAggregate as my_module
+
+if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7')
+
+
+def set_module_args(args):
+ '''prepare arguments so that they will be picked up during module creation'''
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ '''Exception class to be raised by module.exit_json and caught by the test case'''
+
+
+class AnsibleFailJson(Exception):
+ '''Exception class to be raised by module.fail_json and caught by the test case'''
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over exit_json; package return data into an exception'''
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over fail_json; package return data into an exception'''
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockCMConnection():
+ ''' Mock response of http connections '''
+
+ def __init__(self, kind=None, parm1=None):
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'TestA',
+ 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'number_of_disks': 2,
+ 'disk_size_size': 100,
+ 'disk_size_unit': 'GB',
+ 'refresh_token': 'myrefresh_token',
+ })
+
+ def set_args_create_cloudmanager_aggregate(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'Dummyname',
+ 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'number_of_disks': 2,
+ 'disk_size_size': 100,
+ 'disk_size_unit': 'GB',
+ 'refresh_token': 'myrefresh_token',
+ })
+
+ def set_args_create_cloudmanager_aggregate_by_workingenv_name(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'Dummyname',
+ 'working_environment_name': 'wkone',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'number_of_disks': 2,
+ 'disk_size_size': 100,
+ 'disk_size_unit': 'GB',
+ 'refresh_token': 'myrefresh_token',
+ })
+
+ def set_args_delete_cloudmanager_aggregate(self):
+ return dict({
+ 'state': 'absent',
+ 'name': 'Dummyname',
+ 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'number_of_disks': 2,
+ 'disk_size_size': 100,
+ 'disk_size_unit': 'GB',
+ 'refresh_token': 'myrefresh_token',
+ })
+
+ def set_args_delete_cloudmanager_aggregate_by_workingenv_name(self):
+ return dict({
+ 'state': 'absent',
+ 'name': 'Dummyname',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'number_of_disks': 2,
+ 'disk_size_size': 100,
+ 'disk_size_unit': 'GB',
+ 'refresh_token': 'myrefresh_token',
+ })
+
+ def set_args_update_cloudmanager_aggregate(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'TestCMAggregate',
+ 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'number_of_disks': 3,
+ 'disk_size_size': 100,
+ 'disk_size_unit': 'GB',
+ 'refresh_token': 'myrefresh_token',
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ def test_module_fail_when_required_args_present(self, get_token):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleExitJson) as exc:
+ set_module_args(self.set_default_args_pass_check())
+ get_token.return_value = 'test', 'test'
+ my_module()
+ exit_json(changed=True, msg="TestCase Fail when required ars are present")
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch(
+ 'ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aggregate.NetAppCloudmanagerAggregate.get_aggregate')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_aggregate_pass(self, get_post_api, get_aggregate_api, get_token):
+ set_module_args(self.set_args_create_cloudmanager_aggregate())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+ my_obj.rest_api.api_root_path = "my_root_path"
+
+ get_aggregate_api.return_value = None
+ get_post_api.return_value = None, None, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_aggregate: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch(
+ 'ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aggregate.NetAppCloudmanagerAggregate.get_aggregate')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete')
+ def test_delete_cloudmanager_aggregate_pass(self, get_delete_api, get_aggregate_api, get_token):
+ set_module_args(self.set_args_delete_cloudmanager_aggregate())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+ my_obj.rest_api.api_root_path = "my_root_path"
+
+ my_aggregate = {
+ 'name': 'Dummyname',
+ 'state': 'online',
+ 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'refresh_token': 'myrefresh_token',
+ 'disks': [{'device': 'xvdh vol-313', 'position': 'data', 'vmDiskProperties': None,
+ 'ownerNode': 'testAWSa-01', 'name': 'testAWSa-01-i-12h'},
+ {'device': 'xvdi vol-314', 'position': 'data', 'vmDiskProperties': None,
+ 'ownerNode': 'testAWSa-01', 'name': 'testAWSa-01-i-12i'}],
+ 'homeNode': 'testAWSa-01',
+ }
+ get_aggregate_api.return_value = my_aggregate
+ get_delete_api.return_value = 'Aggregated Deleted', None, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_delete_cloudmanager_aggregate: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aggregate.NetAppCloudmanagerAggregate.get_aggregate')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_update_cloudmanager_aggregate_pass(self, get_post_api, get_aggregate_api, get_token):
+ set_module_args(self.set_args_update_cloudmanager_aggregate())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+ my_obj.rest_api.api_root_path = "my_root_path"
+
+ my_aggregate = {
+ 'name': 'Dummyname',
+ 'state': 'online',
+ 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'refresh_token': 'myrefresh_token',
+ 'disks': [{'device': 'xvdh vol-313', 'position': 'data', 'vmDiskProperties': None,
+ 'ownerNode': 'testAWSa-01', 'name': 'testAWSa-01-i-12h'},
+ {'device': 'xvdi vol-314', 'position': 'data', 'vmDiskProperties': None,
+ 'ownerNode': 'testAWSa-01', 'name': 'testAWSa-01-i-12i'}],
+ 'homeNode': 'testAWSa-01',
+ }
+ get_aggregate_api.return_value = my_aggregate
+ get_post_api.return_value = None, None, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_update_cloudmanager_aggregate: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aggregate.NetAppCloudmanagerAggregate.get_aggregate')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_aggregate_by_workingenv_name_pass(self, get_post_api, get_we, get_aggregate_api, get_token):
+ data = self.set_args_create_cloudmanager_aggregate_by_workingenv_name()
+ get_token.return_value = 'test', 'test'
+ my_we = {
+ 'name': 'test',
+ 'publicId': 'test',
+ 'cloudProviderName': 'Amazon'}
+ get_we.return_value = my_we, None
+ data['working_environment_id'] = my_we['publicId']
+ set_module_args(data)
+ my_obj = my_module()
+ my_obj.rest_api.api_root_path = "my_root_path"
+ get_aggregate_api.return_value = None
+ get_post_api.return_value = None, None, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_aggregate: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aggregate.NetAppCloudmanagerAggregate.get_aggregate')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete')
+ def test_delete_cloudmanager_aggregate_by_workingenv_name_pass(self, get_delete_api, get_we, get_aggregate_api, get_token):
+ data = self.set_args_delete_cloudmanager_aggregate_by_workingenv_name()
+ my_we = {
+ 'name': 'test',
+ 'publicId': 'test',
+ 'cloudProviderName': 'Amazon'}
+ get_we.return_value = my_we, None
+ data['working_environment_id'] = my_we['publicId']
+ set_module_args(data)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+ my_obj.rest_api.api_root_path = "my_root_path"
+
+ my_aggregate = {
+ 'name': 'Dummyname',
+ 'state': 'online',
+ 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'refresh_token': 'myrefresh_token',
+ 'disks': [{'device': 'xvdh vol-313', 'position': 'data', 'vmDiskProperties': None,
+ 'ownerNode': 'testAWSa-01', 'name': 'testAWSa-01-i-12h'},
+ {'device': 'xvdi vol-314', 'position': 'data', 'vmDiskProperties': None,
+ 'ownerNode': 'testAWSa-01', 'name': 'testAWSa-01-i-12i'}],
+ 'homeNode': 'testAWSa-01',
+ }
+ get_aggregate_api.return_value = my_aggregate
+ get_delete_api.return_value = 'Aggregated Deleted', None, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_delete_cloudmanager_aggregate: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aws_fsx.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aws_fsx.py
new file mode 100644
index 000000000..cee1e439c
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_aws_fsx.py
@@ -0,0 +1,165 @@
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests Cloudmanager Ansible module: '''
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import sys
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aws_fsx \
+ import NetAppCloudManagerAWSFSX as my_module
+
+if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7')
+
+
+def set_module_args(args):
+ '''prepare arguments so that they will be picked up during module creation'''
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ '''Exception class to be raised by module.exit_json and caught by the test case'''
+
+
+class AnsibleFailJson(Exception):
+ '''Exception class to be raised by module.fail_json and caught by the test case'''
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over exit_json; package return data into an exception'''
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over fail_json; package return data into an exception'''
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'TestA',
+ 'workspace_id': 'test',
+ 'region': 'us-west-1',
+ 'tenant_id': 'account-test',
+ 'storage_capacity_size': 1024,
+ 'throughput_capacity': 512,
+ 'storage_capacity_size_unit': 'TiB',
+ 'aws_credentials_name': 'test',
+ 'primary_subnet_id': 'test',
+ 'secondary_subnet_id': 'test',
+ 'fsx_admin_password': 'password',
+ 'refresh_token': 'myrefresh_token',
+ })
+
+ def set_args_create_cloudmanager_aws_fsx(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'TestA',
+ 'workspace_id': 'test',
+ 'region': 'us-west-1',
+ 'tenant_id': 'account-test',
+ 'storage_capacity_size': 1024,
+ 'storage_capacity_size_unit': 'TiB',
+ 'throughput_capacity': 512,
+ 'aws_credentials_name': 'test',
+ 'primary_subnet_id': 'test',
+ 'secondary_subnet_id': 'test',
+ 'fsx_admin_password': 'password',
+ 'refresh_token': 'myrefresh_token',
+ })
+
+ def set_args_delete_cloudmanager_aws_fsx(self):
+ return dict({
+ 'state': 'absent',
+ 'name': 'Dummyname',
+ 'tenant_id': 'account-test',
+ 'refresh_token': 'myrefresh_token',
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aws_fsx.NetAppCloudManagerAWSFSX.get_aws_credentials_id')
+ def test_module_fail_when_required_args_present(self, get_aws_credentials_id, get_token):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleExitJson) as exc:
+ set_module_args(self.set_default_args_pass_check())
+ get_aws_credentials_id.return_value = '123', None
+ get_token.return_value = 'test', 'test'
+ my_module()
+ exit_json(changed=True, msg="TestCase Fail when required args are present")
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_aws_fsx_details')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aws_fsx.NetAppCloudManagerAWSFSX.wait_on_completion_for_fsx')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aws_fsx.NetAppCloudManagerAWSFSX.check_task_status_for_fsx')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_aws_fsx.NetAppCloudManagerAWSFSX.get_aws_credentials_id')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_aws_fsx_pass(self, get_post_api, get_aws_credentials_id, check_task_status_for_fsx,
+ wait_on_completion_for_fsx, get_aws_fsx_details, get_token):
+ set_module_args(self.set_args_create_cloudmanager_aws_fsx())
+ get_token.return_value = 'test', 'test'
+ get_aws_credentials_id.return_value = '123', None
+ my_obj = my_module()
+
+ response = {'id': 'abcdefg12345'}
+ get_post_api.return_value = response, None, None
+ check_task_status_for_fsx.return_value = {'providerDetails': {'status': {'status': 'ON', 'lifecycle': 'AVAILABLE'}}}, None
+ wait_on_completion_for_fsx.return_value = None
+ get_aws_fsx_details.return_value = None, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_aws_fsx_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_aws_fsx_details')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete')
+ def test_delete_cloudmanager_aws_fsx_pass(self, get_delete_api, get_aws_fsx_details, get_token):
+ set_module_args(self.set_args_delete_cloudmanager_aws_fsx())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ my_fsx = {
+ 'name': 'test',
+ 'id': 'test'}
+ get_aws_fsx_details.return_value = my_fsx, None
+ get_delete_api.return_value = None, None, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_delete_cloudmanager_aws_fsx_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cifs_server.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cifs_server.py
new file mode 100644
index 000000000..023f993af
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cifs_server.py
@@ -0,0 +1,252 @@
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests Cloudmanager Ansible module: '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import sys
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server \
+ import NetAppCloudmanagerCifsServer as my_module
+
+if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7')
+
+
+def set_module_args(args):
+ '''prepare arguments so that they will be picked up during module creation'''
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ '''Exception class to be raised by module.exit_json and caught by the test case'''
+
+
+class AnsibleFailJson(Exception):
+ '''Exception class to be raised by module.fail_json and caught by the test case'''
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over exit_json; package return data into an exception'''
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over fail_json; package return data into an exception'''
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockCMConnection():
+ ''' Mock response of http connections '''
+ def __init__(self, kind=None, parm1=None):
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'refresh_token': 'refreshToken',
+ 'domain': 'test.com',
+ 'username': 'admin',
+ 'password': 'abcde',
+ 'dns_domain': 'test.com',
+ 'ip_addresses': '["1.0.0.1"]',
+ 'netbios': 'cvoname',
+ 'organizational_unit': 'CN=Computers',
+ })
+
+ def set_default_args_with_workingenv_name_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'working_environment_name': 'weone',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'refresh_token': 'refreshToken',
+ 'domain': 'test.com',
+ 'username': 'admin',
+ 'password': 'abcde',
+ 'dns_domain': 'test.com',
+ 'ip_addresses': '["1.0.0.1"]',
+ 'netbios': 'cvoname',
+ 'organizational_unit': 'CN=Computers',
+ })
+
+ def set_using_workgroup_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'refresh_token': 'refreshToken',
+ 'is_workgroup': True,
+ 'server_name': 'abc',
+ 'workgroup_name': 'wk',
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.get_cifs_server')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.create_cifs_server')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+ def test_create_cifs_server_successfully(self, send_request, create, get, get_token):
+ set_module_args(self.set_default_args_pass_check())
+ get.return_value = None
+ create.return_value = None
+ send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, 'dummy')]
+ get_token.return_value = ("type", "token")
+ obj = my_module()
+ obj.rest_api.api_root_path = "test_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.get_cifs_server')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+ def test_create_cifs_server_idempotency(self, send_request, get, get_token):
+ set_module_args(self.set_default_args_pass_check())
+ get.return_value = {
+ 'domain': 'test.com',
+ 'dns_domain': 'test.com',
+ 'ip_addresses': ['1.0.0.1'],
+ 'netbios': 'cvoname',
+ 'organizational_unit': 'CN=Computers',
+ }
+ send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, 'dummy')]
+ get_token.return_value = ("type", "token")
+ obj = my_module()
+ obj.rest_api.api_root_path = "test_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.get_cifs_server')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.create_cifs_server')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+ def test_create_cifs_server_using_workgroup_successfully(self, send_request, create, get, get_token):
+ set_module_args(self.set_using_workgroup_args_pass_check())
+ get.return_value = None
+ create.return_value = None
+ send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, 'dummy')]
+ get_token.return_value = ("type", "token")
+ obj = my_module()
+ obj.rest_api.api_root_path = "test_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.get_cifs_server')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.delete_cifs_server')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+ def test_delete_cifs_server_successfully(self, send_request, delete, get, get_token):
+ args = self.set_default_args_pass_check()
+ args['state'] = 'absent'
+ set_module_args(args)
+ get.return_value = {
+ 'domain': 'test.com',
+ 'dns_domain': 'test.com',
+ 'ip_addresses': ['1.0.0.1'],
+ 'netbios': 'cvoname',
+ 'organizational_unit': 'CN=Computers',
+ }
+ delete.return_value = None
+ send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, 'dummy')]
+ get_token.return_value = ("type", "token")
+ obj = my_module()
+ obj.rest_api.api_root_path = "test_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.get_cifs_server')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.create_cifs_server')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+ def test_create_cifs_server_successfully(self, send_request, create, get, get_we, get_token):
+ args = self.set_default_args_with_workingenv_name_pass_check()
+ my_we = {
+ 'name': 'test',
+ 'publicId': 'test',
+ 'cloudProviderName': 'Amazon'}
+ get_we.return_value = my_we, None
+ args['working_environment_id'] = my_we['publicId']
+ set_module_args(args)
+ get.return_value = None
+ create.return_value = None
+ send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, 'dummy')]
+ get_token.return_value = ("type", "token")
+ obj = my_module()
+ obj.rest_api.api_root_path = "test_root_path"
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.get_cifs_server')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cifs_server.NetAppCloudmanagerCifsServer.delete_cifs_server')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+ def test_delete_cifs_server_with_workingenv_name_successfully(self, send_request, delete, get, get_we, get_token):
+ args = self.set_default_args_with_workingenv_name_pass_check()
+ args['state'] = 'absent'
+ my_we = {
+ 'name': 'test',
+ 'publicId': 'test',
+ 'cloudProviderName': 'Amazon'}
+ get_we.return_value = my_we, None
+ args['working_environment_id'] = my_we['publicId']
+ set_module_args(args)
+ get.return_value = {
+ 'domain': 'test.com',
+ 'dns_domain': 'test.com',
+ 'ip_addresses': ['1.0.0.1'],
+ 'netbios': 'cvoname',
+ 'organizational_unit': 'CN=Computers',
+ }
+ delete.return_value = None
+ send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, 'dummy')]
+ get_token.return_value = ("type", "token")
+ obj = my_module()
+ obj.rest_api.api_root_path = "test_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_aws.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_aws.py
new file mode 100644
index 000000000..dab9cde66
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_aws.py
@@ -0,0 +1,730 @@
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests Cloudmanager Ansible module: '''
+
+from __future__ import (absolute_import, division, print_function)
+from logging import exception
+
+__metaclass__ = type
+
+import json
+import sys
+import pytest
+
+HAS_BOTOCORE = True
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ HAS_BOTOCORE = False
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch
+
+from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws \
+ import NetAppCloudManagerConnectorAWS as my_module, IMPORT_EXCEPTION, main as my_main
+
+if IMPORT_EXCEPTION is not None and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7: %s' % IMPORT_EXCEPTION)
+
+
+def set_module_args(args):
+ '''prepare arguments so that they will be picked up during module creation'''
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ '''Exception class to be raised by module.exit_json and caught by the test case'''
+
+
+class AnsibleFailJson(Exception):
+ '''Exception class to be raised by module.fail_json and caught by the test case'''
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over exit_json; package return data into an exception'''
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over fail_json; package return data into an exception'''
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'TestA',
+ 'region': 'us-west-1',
+ 'key_name': 'dev_automation',
+ 'subnet_id': 'subnet-test',
+ 'ami': 'ami-test',
+ 'security_group_ids': ['sg-test'],
+ 'refresh_token': 'myrefresh_token',
+ 'iam_instance_profile_name': 'OCCM_AUTOMATION',
+ 'account_id': 'account-test',
+ 'company': 'NetApp'
+ })
+
+ def set_args_create_cloudmanager_connector_aws(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'Dummyname',
+ 'region': 'us-west-1',
+ 'key_name': 'dev_automation',
+ 'subnet_id': 'subnet-test',
+ 'ami': 'ami-test',
+ 'security_group_ids': ['sg-test'],
+ 'refresh_token': 'myrefresh_token',
+ 'iam_instance_profile_name': 'OCCM_AUTOMATION',
+ 'account_id': 'account-test',
+ 'company': 'NetApp'
+ })
+
+ def set_args_delete_cloudmanager_connector_aws(self):
+ return dict({
+ 'state': 'absent',
+ 'name': 'Dummyname',
+ 'client_id': 'test',
+ 'instance_id': 'test',
+ 'region': 'us-west-1',
+ 'account_id': 'account-test',
+ 'refresh_token': 'myrefresh_token',
+ 'company': 'NetApp'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ def test_module_fail_when_required_args_present(self, get_token):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleExitJson) as exc:
+ set_module_args(self.set_default_args_pass_check())
+ get_token.return_value = 'test', 'test'
+ my_module()
+ exit_json(changed=True, msg="TestCase Fail when required args are present")
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.get_instance')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.create_instance')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.get_vpc')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.register_agent_to_service')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.get_ami')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_connector_aws_pass(self, get_post_api, get_ami, register_agent_to_service, get_vpc, create_instance, get_instance, get_token):
+ set_module_args(self.set_args_create_cloudmanager_connector_aws())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ get_post_api.return_value = None, None, None
+ get_ami.return_value = 'ami-test'
+ register_agent_to_service.return_value = 'test', 'test'
+ get_vpc.return_value = 'test'
+ create_instance.return_value = 'test', 'test'
+ get_instance.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_connector_aws: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.delete_instance')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.get_instance')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.delete_occm')
+ def test_delete_cloudmanager_connector_aws_pass(self, delete_occm, get_occm_agent_by_id, delete_api, get_instance, delete_instance, get_token):
+ set_module_args(self.set_args_delete_cloudmanager_connector_aws())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ my_instance = {
+ 'InstanceId': 'instance_id_1'
+ }
+ get_instance.return_value = my_instance
+ get_occm_agent_by_id.return_value = {'agentId': 'test', 'state': 'active'}, None
+ delete_api.return_value = None, None, None
+ delete_instance.return_value = None
+ delete_occm.return_value = None, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_delete_cloudmanager_connector_aws: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.delete_instance')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.get_instance')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agents_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.delete_occm')
+ def test_delete_cloudmanager_connector_aws_pass_no_ids(self, delete_occm, get_occm_agents, delete_api, get_instance, delete_instance, get_token):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ args.pop('client_id')
+ args.pop('instance_id')
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ my_connector_aws = {
+ 'name': 'Dummyname',
+ 'client_id': 'test',
+ 'refresh_token': 'myrefresh_token',
+ }
+ my_instance = {
+ 'InstanceId': 'instance_id_1'
+ }
+ # get_connector_aws.return_value = my_connector_aws
+ get_instance.return_value = my_instance
+ delete_api.return_value = None, None, None
+ delete_instance.return_value = None
+ get_occm_agents.return_value = [{'agentId': 'test', 'status': 'active'}], None
+ delete_occm.return_value = None, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print()
+ print('Info: test_delete_cloudmanager_connector_aws: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.delete_instance')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_aws.NetAppCloudManagerConnectorAWS.get_instance')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agents_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.delete_occm')
+ def test_delete_cloudmanager_connector_aws_negative_no_instance(self, delete_occm, get_occm_agents, delete_api, get_instance, delete_instance, get_token):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ args.pop('client_id')
+ args.pop('instance_id')
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ my_connector_aws = {
+ 'name': 'Dummyname',
+ 'client_id': 'test',
+ 'refresh_token': 'myrefresh_token',
+ }
+ my_instance = None
+ # get_connector_aws.return_value = my_connector_aws
+ get_instance.return_value = my_instance
+ delete_api.return_value = None, None, None
+ delete_instance.return_value = None
+ get_occm_agents.return_value = [{'agentId': 'test', 'status': 'active'}], None
+ delete_occm.return_value = None, "some error on delete occm"
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ print()
+ print('Info: test_delete_cloudmanager_connector_aws: %s' % repr(exc.value))
+ msg = "Error: deleting OCCM agent(s): [(None, 'some error on delete occm')]"
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_get_instance_empty(self, get_boto3_client, get_token):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ args.pop('client_id')
+ args.pop('instance_id')
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2()
+ my_obj = my_module()
+ instance = my_obj.get_instance()
+ print('instance', instance)
+ assert instance is None
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_get_instance_one(self, get_boto3_client, get_token):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ args.pop('client_id')
+ args.pop('instance_id')
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'active'}])
+ my_obj = my_module()
+ instance = my_obj.get_instance()
+ print('instance', instance)
+ assert instance
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_get_instance_many_terminated(self, get_boto3_client, get_token):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ args.pop('client_id')
+ args.pop('instance_id')
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'terminated'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ my_obj = my_module()
+ instance = my_obj.get_instance()
+ print('instance', instance)
+ assert instance is None
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_get_instance_many_but_only_one_active(self, get_boto3_client, get_token):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ args.pop('client_id')
+ args.pop('instance_id')
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'active'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ my_obj = my_module()
+ instance = my_obj.get_instance()
+ print('instance', instance)
+ assert instance
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_get_instance_many_but_only_one_active(self, get_boto3_client, get_token):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ args.pop('client_id')
+ args.pop('instance_id')
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'active'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'active', 'name': 'xxxx'}])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.get_instance()
+ msg = "Error: found multiple instances for name"
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_get_instance_exception(self, get_boto3_client, get_token):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ args.pop('client_id')
+ args.pop('instance_id')
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2(raise_exc=True)
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.get_instance()
+ msg = "An error occurred (test_only) when calling the describe_instances operation: forced error in unit testing"
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('time.sleep')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ @patch('boto3.client')
+ def test_create_instance(self, get_boto3_client, register, get_token, get_occm_agent_by_id, dont_sleep):
+ args = self.set_args_create_cloudmanager_connector_aws()
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'terminated'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ register.return_value = {'clientId': 'xxx', 'clientSecret': 'yyy'}, None, None
+ get_occm_agent_by_id.return_value = {'agentId': 'test', 'status': 'active'}, None
+ my_obj = my_module()
+ instance = my_obj.create_instance()
+ print('instance', instance)
+ assert instance
+
+ @patch('time.sleep')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.encode_certificates')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_or_create_account')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ @patch('boto3.client')
+ def test_create_instance_no_ami_with_tags(self, get_boto3_client, register, get_token, get_occm_agent_by_id, get_account, encode_cert, dont_sleep):
+ ''' additional paths: get_ami, add tags, no public IP, no account id '''
+ args = self.set_args_create_cloudmanager_connector_aws()
+ args.pop('ami')
+ args.pop('account_id')
+ args['aws_tag'] = [{'tag_key': 'tkey', 'tag_value': 'tvalue'}]
+ args['associate_public_ip_address'] = False
+ args['proxy_certificates'] = ['cert1', 'cert2']
+ set_module_args(args)
+ get_account.return_value = 'account_id', None
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'terminated'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ encode_cert.return_value = 'base64', None
+ register.return_value = {'clientId': 'xxx', 'clientSecret': 'yyy'}, None, None
+ get_occm_agent_by_id.side_effect = [
+ ({'agentId': 'test', 'status': 'pending'}, None),
+ ({'agentId': 'test', 'status': 'pending'}, None),
+ ({'agentId': 'test', 'status': 'active'}, None)]
+ my_obj = my_module()
+ instance = my_obj.create_instance()
+ print('instance', instance)
+ assert instance
+
+ @patch('time.sleep')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ @patch('boto3.client')
+ def test_create_instance_timeout(self, get_boto3_client, register, get_token, get_occm_agent_by_id, dont_sleep):
+ ''' additional paths: get_ami, add tags, no public IP'''
+ args = self.set_args_create_cloudmanager_connector_aws()
+ args.pop('ami')
+ args['aws_tag'] = [{'tag_key': 'tkey', 'tag_value': 'tvalue'}]
+ args['associate_public_ip_address'] = False
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'terminated'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ register.return_value = {'clientId': 'xxx', 'clientSecret': 'yyy'}, None, None
+ get_occm_agent_by_id.return_value = {'agentId': 'test', 'status': 'pending'}, None
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_instance()
+ msg = "Error: taking too long for OCCM agent to be active or not properly setup"
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('time.sleep')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ @patch('boto3.client')
+ def test_create_instance_error_in_get_agent(self, get_boto3_client, register, get_token, get_occm_agent_by_id, dont_sleep):
+ ''' additional paths: get_ami, add tags, no public IP'''
+ args = self.set_args_create_cloudmanager_connector_aws()
+ args.pop('ami')
+ args['aws_tag'] = [{'tag_key': 'tkey', 'tag_value': 'tvalue'}]
+ args['associate_public_ip_address'] = False
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'terminated'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ register.return_value = {'clientId': 'xxx', 'clientSecret': 'yyy'}, None, None
+ get_occm_agent_by_id.return_value = 'forcing an error', 'intentional error'
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_instance()
+ msg = "Error: not able to get occm status: intentional error, forcing an error"
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('time.sleep')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_or_create_account')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_create_instance_error_in_get_account(self, get_boto3_client, get_token, get_account, dont_sleep):
+ ''' additional paths: get_ami, add tags, no public IP, no account id '''
+ args = self.set_args_create_cloudmanager_connector_aws()
+ args.pop('ami')
+ args.pop('account_id')
+ args['aws_tag'] = [{'tag_key': 'tkey', 'tag_value': 'tvalue'}]
+ args['associate_public_ip_address'] = False
+ set_module_args(args)
+ get_account.return_value = 'forcing an error', 'intentional error'
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'terminated'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_instance()
+ msg = "Error: failed to get account: intentional error."
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('time.sleep')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_or_create_account')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ @patch('boto3.client')
+ def test_create_instance_error_in_register(self, get_boto3_client, register, get_token, get_account, dont_sleep):
+ ''' additional paths: get_ami, add tags, no public IP, no account id '''
+ args = self.set_args_create_cloudmanager_connector_aws()
+ args.pop('ami')
+ args.pop('account_id')
+ args['aws_tag'] = [{'tag_key': 'tkey', 'tag_value': 'tvalue'}]
+ args['associate_public_ip_address'] = False
+ set_module_args(args)
+ get_account.return_value = 'account_id', None
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'terminated'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ register.return_value = 'forcing an error', 'intentional error', 'dummy'
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_instance()
+ msg = "Error: unexpected response on connector setup: intentional error, forcing an error"
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('time.sleep')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.encode_certificates')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_or_create_account')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ @patch('boto3.client')
+ def test_create_instance_error_in_open(self, get_boto3_client, register, get_token, get_account, encode_cert, dont_sleep):
+ ''' additional paths: get_ami, add tags, no public IP, no account id '''
+ args = self.set_args_create_cloudmanager_connector_aws()
+ args.pop('ami')
+ args.pop('account_id')
+ args['aws_tag'] = [{'tag_key': 'tkey', 'tag_value': 'tvalue'}]
+ args['associate_public_ip_address'] = False
+ args['proxy_certificates'] = ['cert1', 'cert2']
+ set_module_args(args)
+ get_account.return_value = 'account_id', None
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'terminated'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ register.return_value = {'clientId': 'xxx', 'clientSecret': 'yyy'}, None, None
+ encode_cert.return_value = None, 'intentional error'
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.create_instance()
+ msg = "Error: could not open/read file 'cert1' of proxy_certificates: intentional error"
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('time.sleep')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_delete_instance(self, get_boto3_client, get_token, get_occm_agent_by_id, dont_sleep):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'active'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ get_occm_agent_by_id.side_effect = [
+ ({'agentId': 'test', 'status': 'active'}, None),
+ ({'agentId': 'test', 'status': 'active'}, None),
+ ({'agentId': 'test', 'status': 'terminated'}, None)]
+ my_obj = my_module()
+ error = my_obj.delete_instance()
+ assert not error
+
+ @patch('time.sleep')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.delete_occm_agents')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agents_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_delete_no_client(self, get_boto3_client, get_token, get_occm_agent_by_id, get_occm_agents_by_name, delete_occm_agents, dont_sleep):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ args.pop('client_id')
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'active'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ get_occm_agent_by_id.side_effect = [
+ ({'agentId': 'test', 'status': 'active'}, None),
+ ({'agentId': 'test', 'status': 'active'}, None),
+ ({'agentId': 'test', 'status': 'terminated'}, None)]
+ get_occm_agents_by_name.return_value = [], None
+ delete_occm_agents.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_main()
+ assert not get_occm_agent_by_id.called
+
+ @patch('time.sleep')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_delete_instance_timeout(self, get_boto3_client, get_token, get_occm_agent_by_id, dont_sleep):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'active'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ get_occm_agent_by_id.return_value = {'agentId': 'test', 'status': 'active'}, None
+ my_obj = my_module()
+ error = my_obj.delete_instance()
+ assert 'Error: taking too long for instance to finish terminating.' == error
+
+ @patch('time.sleep')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_delete_instance_error_on_agent(self, get_boto3_client, get_token, get_occm_agent_by_id, dont_sleep):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'active'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ get_occm_agent_by_id.return_value = {'agentId': 'test', 'status': 'active'}, 'intentional error'
+ my_obj = my_module()
+ error = my_obj.delete_instance()
+ assert 'Error: not able to get occm agent status after deleting instance: intentional error,' in error
+
+ @patch('time.sleep')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_delete_instance_client_id_not_found_403(self, get_boto3_client, get_token, get_occm_agent_by_id, dont_sleep):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'active'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ get_occm_agent_by_id.return_value = 'Action not allowed for user', '403'
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = "Error: not able to get occm agent status after deleting instance: 403,"
+ assert msg in exc.value.args[0]['msg']
+ print(exc.value.args[0])
+
+ @patch('time.sleep')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_delete_instance_client_id_not_found_other(self, get_boto3_client, get_token, get_occm_agent_by_id, dont_sleep):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'active'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ get_occm_agent_by_id.return_value = 'Other error', '404'
+ my_obj = my_module()
+ with pytest.raises(AnsibleFailJson) as exc:
+ my_obj.apply()
+ msg = "Error: getting OCCM agents: 404,"
+ assert msg in exc.value.args[0]['msg']
+
+ @patch('time.sleep')
+ # @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agent_by_id')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_delete_instance_account_id_not_found(self, get_boto3_client, get_token, dont_sleep):
+ args = self.set_args_delete_cloudmanager_connector_aws()
+ args.pop('account_id')
+ args.pop('client_id')
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'active'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ # get_occm_agent_by_id.return_value = 'Other error', '404'
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ # msg = "Error: getting OCCM agents: 404,"
+ assert exc.value.args[0]['account_id'] is None
+ assert exc.value.args[0]['client_id'] is None
+
+ @patch('time.sleep')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_occm_agents_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('boto3.client')
+ def test_modify_instance(self, get_boto3_client, get_token, get_occm_agents_by_name, dont_sleep):
+ args = self.set_args_create_cloudmanager_connector_aws()
+ args['instance_type'] = 't3.large'
+ set_module_args(args)
+ get_token.return_value = 'test', 'test'
+ get_boto3_client.return_value = EC2([{'state': 'active'},
+ {'state': 'terminated', 'reservation': '2'},
+ {'state': 'terminated', 'name': 'xxxx'}])
+ get_occm_agents_by_name.return_value = [{'agentId': 'test', 'status': 'active'}], None
+ my_obj = my_module()
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ msg = "Note: modifying an existing connector is not supported at this time."
+ assert msg == exc.value.args[0]['modify']
+
+
+class EC2:
+ def __init__(self, get_instances=None, create_instance=True, raise_exc=False):
+ ''' list of instances as dictionaries:
+ name, state are optional, and used to build an instance
+ reservation is optional and defaults to 'default'
+ '''
+ self.get_instances = get_instances if get_instances is not None else []
+ self.create_instance = create_instance if create_instance is not None else []
+ self.raise_exc = raise_exc
+
+ def describe_instances(self, Filters=None, InstanceIds=None):
+ ''' return a list of reservations, each reservation is a list of instances
+ '''
+ if self.raise_exc and HAS_BOTOCORE:
+ raise ClientError({'Error': {'Message': 'forced error in unit testing', 'Code': 'test_only'}}, 'describe_instances')
+ print('ec2', Filters)
+ print('ec2', InstanceIds)
+ return self._build_reservations()
+
+ def describe_images(self, Filters=None, Owners=None):
+ ''' AMI '''
+ return {'Images': [{'CreationDate': 'yyyyy', 'ImageId': 'image_id'},
+ {'CreationDate': 'xxxxx', 'ImageId': 'image_id'},
+ {'CreationDate': 'zzzzz', 'ImageId': 'image_id'}]}
+
+ def describe_subnets(self, SubnetIds=None):
+ ''' subnets '''
+ return {'Subnets': [{'VpcId': 'vpc_id'}]}
+
+ def run_instances(self, **kwargs):
+ ''' create and start an instance'''
+ if self.create_instance:
+ return {'Instances': [{'InstanceId': 'instance_id'}]}
+ return {'Instances': []}
+
+ def terminate_instances(self, **kwargs):
+ ''' terminate an instance'''
+ return
+
+ def _build_reservations(self):
+ ''' return a list of reservations, each reservation is a list of instances
+ '''
+ reservations = {}
+ for instance in self.get_instances:
+ reservation = instance.get('reservation', 'default')
+ if reservation not in reservations:
+ reservations[reservation] = []
+ # provide default values for name or state if one is present
+ name, state = None, None
+ if 'name' in instance:
+ name = instance['name']
+ state = instance.get('state', 'active')
+ elif 'state' in instance:
+ name = instance.get('name', 'd_name')
+ state = instance['state']
+ instance_id = instance.get('instance_id', '12345')
+ instance_type = instance.get('instance_type', 't3.xlarge')
+ if name:
+ reservations[reservation].append({'Name': name, 'State': {'Name': state}, 'InstanceId': instance_id, 'InstanceType': instance_type})
+ return {
+ 'Reservations': [
+ {'Instances': instances} for instances in reservations.values()
+ ]
+ }
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_azure.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_azure.py
new file mode 100644
index 000000000..37a93a291
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_azure.py
@@ -0,0 +1,178 @@
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests Cloudmanager Ansible module: '''
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import sys
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch
+
+from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_azure \
+ import NetAppCloudManagerConnectorAzure as my_module, IMPORT_EXCEPTION
+
+if IMPORT_EXCEPTION is not None and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7: %s' % IMPORT_EXCEPTION)
+
+
+def set_module_args(args):
+ '''prepare arguments so that they will be picked up during module creation'''
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ '''Exception class to be raised by module.exit_json and caught by the test case'''
+
+
+class AnsibleFailJson(Exception):
+ '''Exception class to be raised by module.fail_json and caught by the test case'''
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over exit_json; package return data into an exception'''
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over fail_json; package return data into an exception'''
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockCMConnection:
+ ''' Mock response of http connections '''
+
+ def __init__(self, kind=None, parm1=None):
+ self.type = kind
+ self.parm1 = parm1
+
+
+# using pytest natively, without unittest.TestCase
+@pytest.fixture
+def patch_ansible():
+ with patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json) as mocks:
+ yield mocks
+
+
+def set_default_args_pass_check():
+ return dict({
+ 'state': 'present',
+ 'name': 'TestA',
+ 'location': 'westus',
+ 'resource_group': 'occm_group_westus',
+ 'subnet_id': 'Subnet1',
+ 'vnet_id': 'Vnet1',
+ 'subscription_id': 'subscriptionId-test',
+ 'refresh_token': 'myrefresh_token',
+ 'account_id': 'account-test',
+ 'company': 'NetApp',
+ 'admin_username': 'test',
+ 'admin_password': 'test',
+ 'network_security_group_name': 'test'
+ })
+
+
+def set_args_create_cloudmanager_connector_azure():
+ return dict({
+ 'state': 'present',
+ 'name': 'TestA',
+ 'location': 'westus',
+ 'resource_group': 'occm_group_westus',
+ 'subnet_id': 'Subnet1',
+ 'vnet_id': 'Vnet1',
+ 'subscription_id': 'subscriptionId-test',
+ 'refresh_token': 'myrefresh_token',
+ 'account_id': 'account-test',
+ 'company': 'NetApp',
+ 'admin_username': 'test',
+ 'admin_password': 'test',
+ 'network_security_group_name': 'test'
+ })
+
+
+def set_args_delete_cloudmanager_connector_azure():
+ return dict({
+ 'state': 'absent',
+ 'name': 'Dummyname',
+ 'client_id': 'test',
+ 'location': 'westus',
+ 'resource_group': 'occm_group_westus',
+ 'subnet_id': 'Subnet1',
+ 'vnet_id': 'Vnet1',
+ 'subscription_id': 'subscriptionId-test',
+ 'refresh_token': 'myrefresh_token',
+ 'account_id': 'account-test',
+ 'company': 'NetApp',
+ 'admin_username': 'test',
+ 'admin_password': 'test',
+ 'network_security_group_name': 'test'
+ })
+
+
+def test_module_fail_when_required_args_missing(patch_ansible):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+def test_module_fail_when_required_args_present(get_token, patch_ansible):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleExitJson) as exc:
+ set_module_args(set_default_args_pass_check())
+ get_token.return_value = 'test', 'test'
+ my_module()
+ exit_json(changed=True, msg="TestCase Fail when required args are present")
+ assert exc.value.args[0]['changed']
+
+
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_azure.NetAppCloudManagerConnectorAzure.deploy_azure')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_azure.NetAppCloudManagerConnectorAzure.register_agent_to_service')
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+def test_create_cloudmanager_connector_azure_pass(get_post_api, register_agent_to_service, deploy_azure, get_token, patch_ansible):
+ set_module_args(set_args_create_cloudmanager_connector_azure())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ get_post_api.return_value = None, None, None
+ register_agent_to_service.return_value = 'test', 'test'
+ deploy_azure.return_value = None, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_connector_azure: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_azure.NetAppCloudManagerConnectorAzure.get_deploy_azure_vm')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_azure.NetAppCloudManagerConnectorAzure.delete_azure_occm')
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete')
+def test_delete_cloudmanager_connector_azure_pass(get_delete_api, delete_azure_occm, get_deploy_azure_vm, get_token, patch_ansible):
+ set_module_args(set_args_delete_cloudmanager_connector_azure())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ get_deploy_azure_vm.return_value = True
+ delete_azure_occm.return_value = None
+ get_delete_api.return_value = None, None, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_delete_cloudmanager_connector_azure: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_gcp.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_gcp.py
new file mode 100644
index 000000000..9d74af2d7
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_connector_gcp.py
@@ -0,0 +1,407 @@
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests Cloudmanager Ansible module: '''
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import sys
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch
+
+from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp \
+ import NetAppCloudManagerConnectorGCP as my_module
+
+IMPORT_ERRORS = []
+HAS_GCP_COLLECTION = False
+
+try:
+ from google import auth
+ from google.auth.transport import requests
+ from google.oauth2 import service_account
+ import yaml
+ HAS_GCP_COLLECTION = True
+except ImportError as exc:
+ IMPORT_ERRORS.append(str(exc))
+
+if not HAS_GCP_COLLECTION and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required google packages on 2.6 and 2.7')
+
+
+def set_module_args(args):
+ '''prepare arguments so that they will be picked up during module creation'''
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ '''Exception class to be raised by module.exit_json and caught by the test case'''
+
+
+class AnsibleFailJson(Exception):
+ '''Exception class to be raised by module.fail_json and caught by the test case'''
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over exit_json; package return data into an exception'''
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over fail_json; package return data into an exception'''
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockCMConnection():
+ ''' Mock response of http connections '''
+
+ def __init__(self, kind=None, parm1=None):
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+
+# using pytest natively, without unittest.TestCase
+@pytest.fixture(name='patch_ansible')
+def fixture_patch_ansible():
+ with patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json) as mocks:
+ yield mocks
+
+
+def set_default_args_pass_check():
+ return dict({
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'refresh_token': 'my_refresh_token',
+ 'state': 'present',
+ 'name': 'CxName',
+ 'project_id': 'tlv-support',
+ 'zone': 'us-west-1',
+ 'account_id': 'account-test',
+ 'company': 'NetApp',
+ 'service_account_email': 'terraform-user@tlv-support.iam.gserviceaccount.com',
+ })
+
+
+def set_args_create_cloudmanager_connector_gcp():
+ return dict({
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'refresh_token': 'my_refresh_token',
+ 'state': 'present',
+ 'name': 'CxName',
+ 'project_id': 'tlv-support',
+ 'zone': 'us-west-1',
+ 'account_id': 'account-test',
+ 'company': 'NetApp',
+ 'service_account_email': 'terraform-user@tlv-support.iam.gserviceaccount.com',
+ 'service_account_path': 'test.json',
+ })
+
+
+def set_args_delete_cloudmanager_connector_gcp():
+ return dict({
+ 'client_id': 'test',
+ 'refresh_token': 'my_refresh_token',
+ 'state': 'absent',
+ 'name': 'CxName',
+ 'project_id': 'tlv-support',
+ 'zone': 'us-west-1',
+ 'account_id': 'account-test',
+ 'company': 'NetApp',
+ 'service_account_email': 'terraform-user@tlv-support.iam.gserviceaccount.com',
+ 'service_account_path': 'test.json',
+ })
+
+
+def test_module_fail_when_required_args_missing(patch_ansible):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+def test_module_fail_when_required_args_present(get_token, get_gcp_token, patch_ansible):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleExitJson) as exc:
+ set_module_args(set_default_args_pass_check())
+ get_token.return_value = 'bearer', 'test'
+ get_gcp_token.return_value = 'token', None
+ my_module()
+ exit_json(changed=True, msg="TestCase Fail when required args are present")
+ assert exc.value.args[0]['changed']
+
+
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.deploy_gcp_vm')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_custom_data_for_gcp')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.create_occm_gcp')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_deploy_vm')
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+def test_create_cloudmanager_connector_gcp_pass(get_post_api, get_vm, create_occm_gcp, get_custom_data_for_gcp,
+ deploy_gcp_vm, get_gcp_token, get_token, patch_ansible):
+ set_module_args(set_args_create_cloudmanager_connector_gcp())
+ get_token.return_value = 'bearer', 'test'
+ get_gcp_token.return_value = 'test', None
+ my_obj = my_module()
+
+ get_vm.return_value = None
+ deploy_gcp_vm.return_value = None, 'test', None
+ get_custom_data_for_gcp.return_value = 'test', 'test', None
+ create_occm_gcp.return_value = 'test'
+ get_post_api.return_value = None, None, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_connector_gcp: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed'], create_occm_gcp.return_value[1]
+
+
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.delete_occm_gcp')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_deploy_vm')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_occm_agents')
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete')
+def test_delete_cloudmanager_connector_gcp_pass(get_delete_api, get_agents, get_deploy_vm, delete_occm_gcp, get_gcp_token, get_token, patch_ansible):
+ set_module_args(set_args_delete_cloudmanager_connector_gcp())
+ get_token.return_value = 'bearer', 'test'
+ get_gcp_token.return_value = 'test', None
+ my_obj = my_module()
+
+ my_connector_gcp = {
+ 'name': 'Dummyname-vm-boot-deployment',
+ 'client_id': 'test',
+ 'refresh_token': 'my_refresh_token',
+ 'operation': {'status': 'active'}
+ }
+ get_deploy_vm.return_value = my_connector_gcp
+ get_agents.return_value = []
+ get_delete_api.return_value = None, None, None
+ delete_occm_gcp.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_delete_cloudmanager_connector_gcp: %s' % repr(exc.value))
+
+ assert exc.value.args[0]['changed']
+
+
+TOKEN_DICT = {
+ 'access_token': 'access_token',
+ 'token_type': 'token_type'
+}
+
+
+AGENT_DICTS = {
+ 'active': {
+ 'agent': {'status': 'active'},
+ },
+ 'pending': {
+ 'agent': {'status': 'pending'},
+ },
+ 'other': {
+ 'agent': {'status': 'pending', 'agentId': 'agent11', 'name': 'CxName', 'provider': 'GCP'},
+ }
+}
+
+
+CLIENT_DICT = {
+ 'clientId': '12345',
+ 'clientSecret': 'a1b2c3'
+}
+
+SRR = {
+ # common responses (json_dict, error, ocr_id)
+ 'empty_good': ({}, None, None),
+ 'zero_record': ({'records': []}, None, None),
+ 'get_token': (TOKEN_DICT, None, None),
+ 'get_gcp_token': (TOKEN_DICT, None, None),
+ 'get_agent_status_active': (AGENT_DICTS['active'], None, None),
+ 'get_agent_status_pending': (AGENT_DICTS['pending'], None, None),
+ 'get_agent_status_other': (AGENT_DICTS['other'], None, None),
+ 'get_agents': ({'agents': [AGENT_DICTS['other']['agent']]}, None, None),
+ 'get_agents_empty': ({'agents': []}, None, None),
+ 'get_agent_not_found': (b"{'message': 'Action not allowed for user'}", '403', None),
+ 'get_vm': ({'operation': {'status': 'active'}}, None, None),
+ 'get_vm_not_found': (b"{'message': 'is not found'}", '404', None),
+ 'register_agent': (CLIENT_DICT, None, None),
+ 'end_of_sequence': (None, "Unexpected call to send_request", None),
+ 'generic_error': (None, "Expected error", None),
+}
+
+
+@patch('time.sleep')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+def test_delete_occm_gcp_pass(mock_request, get_gcp_token, ignore_sleep, patch_ansible):
+ set_module_args(set_args_delete_cloudmanager_connector_gcp())
+ get_gcp_token.return_value = 'test', None
+ mock_request.side_effect = [
+ SRR['get_token'], # OAUTH
+ SRR['empty_good'], # delete
+ SRR['get_agent_status_active'], # status
+ SRR['get_agent_status_pending'], # status
+ SRR['get_agent_status_other'], # status
+ SRR['end_of_sequence'],
+ ]
+ my_obj = my_module()
+
+ error = my_obj.delete_occm_gcp()
+ print(error)
+ print(mock_request.mock_calls)
+ assert error is None
+
+
+@patch('time.sleep')
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+def test_create_occm_gcp_pass(mock_request, get_gcp_token, ignore_sleep, patch_ansible):
+ set_module_args(set_args_create_cloudmanager_connector_gcp())
+ get_gcp_token.return_value = 'test', None
+ mock_request.side_effect = [
+ SRR['get_token'], # OAUTH
+ SRR['register_agent'], # register
+ SRR['empty_good'], # deploy
+ SRR['get_agent_status_pending'], # status
+ SRR['get_agent_status_active'], # status
+ SRR['end_of_sequence'],
+ ]
+ my_obj = my_module()
+
+ client_id = my_obj.create_occm_gcp()
+ print(client_id)
+ print(mock_request.mock_calls)
+ assert client_id == '12345'
+
+
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+def test_get_deploy_vm_pass(mock_request, get_gcp_token, patch_ansible):
+ set_module_args(set_args_delete_cloudmanager_connector_gcp())
+ get_gcp_token.return_value = 'test', None
+ mock_request.side_effect = [
+ SRR['get_token'], # OAUTH
+ SRR['get_vm'], # get
+ SRR['end_of_sequence'],
+ ]
+ my_obj = my_module()
+
+ vm = my_obj.get_deploy_vm()
+ print(vm)
+ print(mock_request.mock_calls)
+ assert vm == SRR['get_vm'][0]
+
+
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+def test_get_occm_agents_absent_pass(mock_request, get_gcp_token, patch_ansible):
+ set_module_args(set_args_delete_cloudmanager_connector_gcp())
+ get_gcp_token.return_value = 'test', None
+ mock_request.side_effect = [
+ SRR['get_token'], # OAUTH
+ SRR['get_agent_status_active'], # get
+ SRR['end_of_sequence'],
+ ]
+ my_obj = my_module()
+
+ agents = my_obj.get_occm_agents()
+ print(agents)
+ print(mock_request.mock_calls)
+ assert agents == [SRR['get_agent_status_active'][0]['agent']]
+
+
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+def test_get_occm_agents_present_pass(mock_request, get_gcp_token, patch_ansible):
+ set_module_args(set_args_create_cloudmanager_connector_gcp())
+ get_gcp_token.return_value = 'test', None
+ mock_request.side_effect = [
+ SRR['get_token'], # OAUTH
+ SRR['get_agents'], # get
+ SRR['end_of_sequence'],
+ ]
+ my_obj = my_module()
+
+ agents = my_obj.get_occm_agents()
+ print(agents)
+ print(mock_request.mock_calls)
+ assert agents == SRR['get_agents'][0]['agents']
+
+
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+def test_create_idempotent(mock_request, get_gcp_token, patch_ansible):
+ set_module_args(set_args_create_cloudmanager_connector_gcp())
+ get_gcp_token.return_value = 'test', None
+ mock_request.side_effect = [
+ SRR['get_token'], # OAUTH
+ SRR['get_vm'], # get
+ SRR['get_agents'], # get
+ SRR['end_of_sequence'],
+ ]
+ my_obj = my_module()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(mock_request.mock_calls)
+ print(exc)
+ assert not exc.value.args[0]['changed']
+ assert exc.value.args[0]['client_id'] == SRR['get_agents'][0]['agents'][0]['agentId']
+
+
+@patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+def test_delete_idempotent(mock_request, get_gcp_token, patch_ansible):
+ set_module_args(set_args_delete_cloudmanager_connector_gcp())
+ get_gcp_token.return_value = 'test', None
+ mock_request.side_effect = [
+ SRR['get_token'], # OAUTH
+ SRR['get_vm_not_found'], # get vn
+ SRR['get_agent_not_found'], # get agents
+ SRR['end_of_sequence'],
+ ]
+ my_obj = my_module()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print(mock_request.mock_calls)
+ print(exc)
+ assert not exc.value.args[0]['changed']
+ assert exc.value.args[0]['client_id'] == ""
+
+
+# @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_connector_gcp.NetAppCloudManagerConnectorGCP.get_gcp_token')
+# @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+# def test_delete_idempotent(mock_request, get_gcp_token, patch_ansible):
+# set_module_args(set_args_delete_cloudmanager_connector_gcp())
+# get_gcp_token.return_value = 'test', None
+# mock_request.side_effect = [
+# SRR['get_token'], # OAUTH
+# SRR['get_vm_not_found'], # get vn
+# SRR['get_agents'], # get
+# SRR['end_of_sequence'],
+# ]
+# my_obj = my_module()
+
+# with pytest.raises(AnsibleExitJson) as exc:
+# my_obj.apply()
+# print(mock_request.mock_calls)
+# print(exc)
+# assert not exc.value.args[0]['changed']
+# assert exc.value.args[0]['client_id'] == SRR['get_agents'][0][0]['agentId']
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_aws.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_aws.py
new file mode 100644
index 000000000..e3dc685d4
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_aws.py
@@ -0,0 +1,426 @@
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests Cloudmanager Ansible module: '''
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import sys
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch
+
+from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_aws \
+ import NetAppCloudManagerCVOAWS as my_module, IMPORT_EXCEPTION
+
+if IMPORT_EXCEPTION is not None and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7: %s' % IMPORT_EXCEPTION)
+
+
+def set_module_args(args):
+ '''prepare arguments so that they will be picked up during module creation'''
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ '''Exception class to be raised by module.exit_json and caught by the test case'''
+
+
+class AnsibleFailJson(Exception):
+ '''Exception class to be raised by module.fail_json and caught by the test case'''
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over exit_json; package return data into an exception'''
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over fail_json; package return data into an exception'''
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'TestA',
+ 'client_id': 'test',
+ 'region': 'us-west-1',
+ 'use_latest_version': False,
+ 'ontap_version': 'ONTAP-9.10.0.T1',
+ 'vpc_id': 'vpc-test',
+ 'subnet_id': 'subnet-test',
+ 'svm_password': 'password',
+ 'instance_type': 'm5.xlarge',
+ 'refresh_token': 'myrefresh_token',
+ 'is_ha': False
+ })
+
+ def set_args_create_cloudmanager_cvo_aws(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'Dummyname',
+ 'client_id': 'test',
+ 'region': 'us-west-1',
+ 'vpc_id': 'vpc-test',
+ 'subnet_id': 'subnet-test',
+ 'svm_password': 'password',
+ 'refresh_token': 'myrefresh_token',
+ 'is_ha': False
+ })
+
+ def set_args_delete_cloudmanager_cvo_aws(self):
+ return dict({
+ 'state': 'absent',
+ 'name': 'Dummyname',
+ 'client_id': 'test',
+ 'region': 'us-west-1',
+ 'vpc_id': 'vpc-test',
+ 'subnet_id': 'subnet-test',
+ 'svm_password': 'password',
+ 'refresh_token': 'myrefresh_token',
+ 'is_ha': False
+ })
+
+ def set_args_create_bynode_cloudmanager_cvo_aws(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'Dummyname',
+ 'client_id': 'test',
+ 'region': 'us-west-1',
+ 'vpc_id': 'vpc-test',
+ 'subnet_id': 'subnet-test',
+ 'svm_password': 'password',
+ 'refresh_token': 'myrefresh_token',
+ 'license_type': 'cot-premium-byol',
+ 'platform_serial_number': '12345678',
+ 'is_ha': False
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ def test_module_fail_when_required_args_present(self, get_token):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleExitJson) as exc:
+ set_module_args(self.set_default_args_pass_check())
+ get_token.return_value = 'test', 'test'
+ my_module()
+ exit_json(changed=True, msg="TestCase Fail when required args are present")
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_aws.NetAppCloudManagerCVOAWS.get_vpc')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_aws_pass(self, get_post_api, get_working_environment_details_by_name, get_nss,
+ get_tenant, get_vpc, wait_on_completion, get_token):
+ set_module_args(self.set_args_create_cloudmanager_cvo_aws())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_working_environment_details_by_name.return_value = None, None
+ get_post_api.return_value = response, None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ get_vpc.return_value = 'test'
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_aws_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_aws.NetAppCloudManagerCVOAWS.get_vpc')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_aws_ha_pass(self, get_post_api, get_working_environment_details_by_name, get_nss,
+ get_tenant, get_vpc, wait_on_completion, get_token):
+ data = self.set_args_create_cloudmanager_cvo_aws()
+ data['is_ha'] = True
+ data['license_type'] = 'ha-capacity-paygo'
+ data['capacity_package_name'] = 'Essential'
+ data.pop('subnet_id')
+ set_module_args(data)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_working_environment_details_by_name.return_value = None, None
+ get_post_api.return_value = response, None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ get_vpc.return_value = 'test'
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_aws_ha_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_aws.NetAppCloudManagerCVOAWS.get_vpc')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_aws_capacity_license_pass(self, get_post_api,
+ get_working_environment_details_by_name, get_nss,
+ get_tenant, get_vpc, wait_on_completion, get_token):
+ data = self.set_args_create_cloudmanager_cvo_aws()
+ data['license_type'] = 'capacity-paygo'
+ data['capacity_package_name'] = 'Essential'
+ set_module_args(data)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_working_environment_details_by_name.return_value = None, None
+ get_post_api.return_value = response, None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ get_vpc.return_value = 'test'
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_aws_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_aws.NetAppCloudManagerCVOAWS.get_vpc')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_aws_ha_capacity_license_pass(self, get_post_api,
+ get_working_environment_details_by_name, get_nss,
+ get_tenant, get_vpc, wait_on_completion, get_token):
+ data = self.set_args_create_cloudmanager_cvo_aws()
+ data['is_ha'] = True
+ data['license_type'] = 'ha-capacity-paygo'
+ data['capacity_package_name'] = 'Essential'
+ data.pop('subnet_id')
+ set_module_args(data)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_working_environment_details_by_name.return_value = None, None
+ get_post_api.return_value = response, None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ get_vpc.return_value = 'test'
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_aws_ha_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_aws.NetAppCloudManagerCVOAWS.get_vpc')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_aws_nodebase_license_pass(self, get_post_api,
+ get_working_environment_details_by_name, get_nss,
+ get_tenant, get_vpc, wait_on_completion, get_token):
+ data = self.set_args_create_bynode_cloudmanager_cvo_aws()
+ set_module_args(data)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_working_environment_details_by_name.return_value = None, None
+ get_post_api.return_value = response, None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ get_vpc.return_value = 'test'
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_aws_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_aws.NetAppCloudManagerCVOAWS.get_vpc')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_aws_ha_nodebase_license_pass(self, get_post_api,
+ get_working_environment_details_by_name, get_nss,
+ get_tenant, get_vpc, wait_on_completion, get_token):
+ data = self.set_args_create_bynode_cloudmanager_cvo_aws()
+ data['license_type'] = 'ha-cot-premium-byol'
+ data['platform_serial_number_node1'] = '12345678'
+ data['platform_serial_number_node2'] = '23456789'
+ set_module_args(data)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_working_environment_details_by_name.return_value = None, None
+ get_post_api.return_value = response, None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ get_vpc.return_value = 'test'
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_aws_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete')
+ def test_delete_cloudmanager_cvo_aws_pass(self, get_delete_api, get_working_environment_details_by_name,
+ wait_on_completion, get_token):
+ set_module_args(self.set_args_delete_cloudmanager_cvo_aws())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ my_cvo = {
+ 'name': 'test',
+ 'publicId': 'test'}
+ get_working_environment_details_by_name.return_value = my_cvo, None
+ get_delete_api.return_value = None, None, None
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_delete_cloudmanager_cvo_aws_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_writing_speed_state')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_instance_license_type')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_tier_level')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_cvo_tags')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_svm_password')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.upgrade_ontap_image')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_property')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ def test_change_cloudmanager_cvo_aws(self, get_cvo, get_property, get_details, upgrade_ontap_image, update_svm_password, update_cvo_tags,
+ update_tier_level, update_instance_license_type, update_writing_speed_state, get_token):
+ data = self.set_default_args_pass_check()
+ data['svm_password'] = 'newpassword'
+ data['update_svm_password'] = True
+ data['ontap_version'] = 'ONTAP-9.10.1P3.T1'
+ data['upgrade_ontap_version'] = True
+ set_module_args(data)
+
+ modify = ['svm_password', 'aws_tag', 'tier_level', 'ontap_version', 'instance_type', 'license_type', 'writing_speed_state']
+
+ my_cvo = {
+ 'name': 'TestA',
+ 'publicId': 'test',
+ 'cloudProviderName': 'Amazon',
+ 'svm_password': 'password',
+ 'isHa': False,
+ 'svmName': 'svm_TestA',
+ 'tenantId': 'Tenant-test',
+ 'workingEnvironmentType': 'VSA',
+ }
+ get_cvo.return_value = my_cvo, None
+ cvo_property = {'name': 'TestA',
+ 'publicId': 'test',
+ 'status': {'status': 'ON'},
+ 'ontapClusterProperties': {
+ 'capacityTierInfo': {'tierLevel': 'normal'},
+ 'licenseType': {'capacityLimit': {'size': 2.0, 'unit': 'TB'},
+ 'name': 'Cloud Volumes ONTAP Capacity Based Charging'},
+ 'ontapVersion': '9.10.0',
+ 'upgradeVersions': [{'autoUpdateAllowed': False,
+ 'imageVersion': 'ONTAP-9.10.1P3',
+ 'lastModified': 1634467078000}],
+ 'writingSpeedState': 'NORMAL'},
+ 'awsProperties': {'accountId': u'123456789011',
+ 'availabilityZones': [u'us-east-1b'],
+ 'bootDiskSize': None,
+ 'cloudProviderAccountId': None,
+ 'coreDiskExists': True,
+ 'instances': [{'availabilityZone': 'us-east-1b',
+ 'id': 'i-31',
+ 'imageId': 'ami-01a6f1234cb1ec375',
+ 'instanceProfileId': 'SimFabricPoolInstanceProfileId',
+ 'instanceType': 'm5.2xlarge',
+ 'isOCCMInstance': False,
+ 'isVsaInstance': True,
+ }],
+ 'regionName': 'us-west-1',
+ }
+ }
+ get_property.return_value = cvo_property, None
+ cvo_details = {'cloudProviderName': 'Amazon',
+ 'isHA': False,
+ 'name': 'TestA',
+ 'ontapClusterProperties': None,
+ 'publicId': 'test',
+ 'status': {'status': 'ON'},
+ 'userTags': {'key1': 'value1'},
+ 'workingEnvironmentType': 'VSA'}
+ get_details.return_value = cvo_details, None
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ for item in modify:
+ if item == 'svm_password':
+ update_svm_password.return_value = True, None
+ elif item == 'aws_tag':
+ update_cvo_tags.return_value = True, None
+ elif item == 'tier_level':
+ update_tier_level.return_value = True, None
+ elif item == 'ontap_version':
+ upgrade_ontap_image.return_value = True, None
+ elif item == 'writing_speed_state':
+ update_writing_speed_state.return_value = True, None
+ elif item == 'instance_type' or item == 'license_type':
+ update_instance_license_type.return_value = True, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_change_cloudmanager_cvo_aws: %s' % repr(exc.value))
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_azure.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_azure.py
new file mode 100644
index 000000000..f3e072bdb
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_azure.py
@@ -0,0 +1,439 @@
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests Cloudmanager Ansible module: '''
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import sys
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_azure \
+ import NetAppCloudManagerCVOAZURE as my_module
+
+if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7')
+
+
+def set_module_args(args):
+ '''prepare arguments so that they will be picked up during module creation'''
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ '''Exception class to be raised by module.exit_json and caught by the test case'''
+
+
+class AnsibleFailJson(Exception):
+ '''Exception class to be raised by module.fail_json and caught by the test case'''
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over exit_json; package return data into an exception'''
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over fail_json; package return data into an exception'''
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockCMConnection():
+ ''' Mock response of http connections '''
+
+ def __init__(self, kind=None, parm1=None):
+ self.type = kind
+ self.parm1 = parm1
+ # self.token_type, self.token = self.get_token()
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'TestA',
+ 'client_id': 'test',
+ 'location': 'westus',
+ 'use_latest_version': False,
+ 'ontap_version': 'ONTAP-9.10.0.T1.azure',
+ 'vnet_id': 'vpc-test',
+ 'resource_group': 'test',
+ 'subnet_id': 'subnet-test',
+ 'subscription_id': 'test',
+ 'cidr': '10.0.0.0/24',
+ 'svm_password': 'password',
+ 'license_type': 'azure-cot-standard-paygo',
+ 'instance_type': 'Standard_DS4_v2',
+ 'refresh_token': 'myrefresh_token',
+ 'is_ha': False
+ })
+
+ def set_args_create_cloudmanager_cvo_azure(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'Dummyname',
+ 'client_id': 'test',
+ 'location': 'westus',
+ 'vnet_id': 'vpc-test',
+ 'resource_group': 'test',
+ 'subscription_id': 'test',
+ 'cidr': '10.0.0.0/24',
+ 'subnet_id': 'subnet-test',
+ 'svm_password': 'password',
+ 'refresh_token': 'myrefresh_token',
+ 'is_ha': False
+ })
+
+ def set_args_delete_cloudmanager_cvo_azure(self):
+ return dict({
+ 'state': 'absent',
+ 'name': 'Dummyname',
+ 'client_id': 'test',
+ 'location': 'westus',
+ 'vnet_id': 'vpc-test',
+ 'resource_group': 'test',
+ 'subscription_id': 'test',
+ 'cidr': '10.0.0.0/24',
+ 'subnet_id': 'subnet-test',
+ 'svm_password': 'password',
+ 'refresh_token': 'myrefresh_token',
+ 'is_ha': False
+ })
+
+ def set_args_create_bynode_cloudmanager_cvo_azure(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'Dummyname',
+ 'client_id': 'test',
+ 'location': 'westus',
+ 'vnet_id': 'vpc-test',
+ 'resource_group': 'test',
+ 'subscription_id': 'test',
+ 'cidr': '10.0.0.0/24',
+ 'subnet_id': 'subnet-test',
+ 'svm_password': 'password',
+ 'refresh_token': 'myrefresh_token',
+ 'license_type': 'azure-cot-premium-byol',
+ 'serial_number': '12345678',
+ 'is_ha': False
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ self.rest_api = MockCMConnection()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ def test_module_fail_when_required_args_present(self, get_token):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleExitJson) as exc:
+ set_module_args(self.set_default_args_pass_check())
+ get_token.return_value = 'test', 'test'
+ my_module()
+ exit_json(changed=True, msg="TestCase Fail when required args are present")
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_azure_pass(self, get_post_api, get_working_environment_details_by_name, get_nss,
+ get_tenant, wait_on_completion, get_token):
+ set_module_args(self.set_args_create_cloudmanager_cvo_azure())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_post_api.return_value = response, None, None
+ get_working_environment_details_by_name.return_value = None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_azure_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_azure_capacity_license_pass(self, get_post_api,
+ get_working_environment_details_by_name, get_nss,
+ get_tenant, wait_on_completion, get_token):
+ data = self.set_args_create_cloudmanager_cvo_azure()
+ data['license_type'] = 'capacity-paygo'
+ data['capacity_package_name'] = 'Essential'
+ set_module_args(data)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_post_api.return_value = response, None, None
+ get_working_environment_details_by_name.return_value = None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_azure_capacity_license_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_azure_ha_capacity_license_pass(self, get_post_api,
+ get_working_environment_details_by_name, get_nss,
+ get_tenant, wait_on_completion, get_token):
+ data = self.set_args_create_cloudmanager_cvo_azure()
+ data['is_ha'] = True
+ data['license_type'] = 'ha-capacity-paygo'
+ data['capacity_package_name'] = 'Professional'
+ set_module_args(data)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_post_api.return_value = response, None, None
+ get_working_environment_details_by_name.return_value = None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_azure_ha_capacity_license_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_azure_nodebase_license_pass(self, get_post_api,
+ get_working_environment_details_by_name, get_nss,
+ get_tenant, wait_on_completion, get_token):
+ data = self.set_args_create_bynode_cloudmanager_cvo_azure()
+ set_module_args(data)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_post_api.return_value = response, None, None
+ get_working_environment_details_by_name.return_value = None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_azure_nodebase_license_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_azure_ha_nodebase_license_pass(self, get_post_api,
+ get_working_environment_details_by_name, get_nss,
+ get_tenant, wait_on_completion, get_token):
+ data = self.set_args_create_bynode_cloudmanager_cvo_azure()
+ data['is_ha'] = True
+ data['license_type'] = 'azure-ha-cot-premium-byol'
+ data['platform_serial_number_node1'] = '12345678'
+ data['platform_serial_number_node2'] = '23456789'
+ set_module_args(data)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_post_api.return_value = response, None, None
+ get_working_environment_details_by_name.return_value = None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_azure_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_azure_ha_pass(self, get_post_api, get_working_environment_details_by_name, get_nss,
+ get_tenant, wait_on_completion, get_token):
+ data = self.set_args_create_cloudmanager_cvo_azure()
+ data['is_ha'] = True
+ data['license_type'] = 'ha-capacity-paygo'
+ data['capacity_package_name'] = 'Essential'
+ set_module_args(data)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_post_api.return_value = response, None, None
+ get_working_environment_details_by_name.return_value = None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_azure_ha_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete')
+ def test_delete_cloudmanager_cvo_azure_pass(self, get_delete_api, get_working_environment_details_by_name,
+ wait_on_completion, get_token):
+ set_module_args(self.set_args_delete_cloudmanager_cvo_azure())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ my_cvo = {
+ 'name': 'Dummyname',
+ 'publicId': 'test'}
+ get_working_environment_details_by_name.return_value = my_cvo, None
+ get_delete_api.return_value = None, None, None
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_delete_cloudmanager_cvo_azure_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_writing_speed_state')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_instance_license_type')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_tier_level')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_cvo_tags')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_svm_password')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.upgrade_ontap_image')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_property')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ def test_change_cloudmanager_cvo_azure(self, get_cvo, get_property, get_details, upgrade_ontap_image, update_svm_password, update_cvo_tags,
+ update_tier_level, update_instance_license_type, update_writing_speed_state, get_token):
+ data = self.set_default_args_pass_check()
+ data['svm_password'] = 'newpassword'
+ data['update_svm_password'] = True
+ data['ontap_version'] = 'ONTAP-9.10.1P3.T1.azure'
+ data['upgrade_ontap_version'] = True
+ data['instance_type'] = 'Standard_DS13_v2'
+ set_module_args(data)
+
+ modify = ['svm_password', 'azure_tag', 'tier_level', 'ontap_version', 'instance_type', 'license_type']
+
+ my_cvo = {
+ 'name': 'TestA',
+ 'publicId': 'test',
+ 'svm_password': 'password',
+ 'isHA': False,
+ 'azure_tag': [{'tag_key': 'keya', 'tag_value': 'valuea'}, {'tag_key': 'keyb', 'tag_value': 'valueb'}],
+ }
+ get_cvo.return_value = my_cvo, None
+
+ cvo_property = {'name': 'TestA',
+ 'publicId': 'test',
+ 'status': {'status': 'ON'},
+ 'ontapClusterProperties': {
+ 'capacityTierInfo': {'tierLevel': 'normal'},
+ 'licensePackageName': 'Professional',
+ 'licenseType': {'capacityLimit': {'size': 2000.0, 'unit': 'TB'},
+ 'name': 'Cloud Volumes ONTAP Capacity Based Charging'},
+ 'ontapVersion': '9.10.0.T1.azure',
+ 'upgradeVersions': [{'autoUpdateAllowed': False,
+ 'imageVersion': 'ONTAP-9.10.1P3',
+ 'lastModified': 1634467078000}],
+ 'writingSpeedState': 'NORMAL'},
+ 'providerProperties': {
+ 'cloudProviderAccountId': 'CloudProviderAccount-abcdwxyz',
+ 'regionName': 'westus',
+ 'instanceType': 'Standard_DS4_v2',
+ 'resourceGroup': {
+ 'name': 'TestA-rg',
+ 'location': 'westus',
+ 'tags': {
+ 'DeployedByOccm': 'true'
+ }
+ },
+ 'vnetCidr': '10.0.0.0/24',
+ 'tags': {
+ 'DeployedByOccm': 'true'
+ }},
+ 'tenantId': 'Tenant-abCdEfg1',
+ 'workingEnvironmentTyp': 'VSA'
+ }
+ get_property.return_value = cvo_property, None
+ cvo_details = {'cloudProviderName': 'Azure',
+ 'isHA': False,
+ 'name': 'TestA',
+ 'ontapClusterProperties': None,
+ 'publicId': 'test',
+ 'status': {'status': 'ON'},
+ 'userTags': {'DeployedByOccm': 'true', 'key1': 'value1'},
+ 'workingEnvironmentType': 'VSA'}
+ get_details.return_value = cvo_details, None
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ for item in modify:
+ if item == 'svm_password':
+ update_svm_password.return_value = True, None
+ elif item == 'azure_tag':
+ update_cvo_tags.return_value = True, None
+ elif item == 'tier_level':
+ update_tier_level.return_value = True, None
+ elif item == 'ontap_version':
+ upgrade_ontap_image.return_value = True, None
+ elif item == 'writing_speed_state':
+ update_writing_speed_state.return_value = True, None
+ elif item == 'instance_type':
+ update_instance_license_type.return_value = True, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_change_cloudmanager_cvo_azure: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_gcp.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_gcp.py
new file mode 100644
index 000000000..1209d2b9e
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_cvo_gcp.py
@@ -0,0 +1,543 @@
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests Cloudmanager Ansible module: '''
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import sys
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_cvo_gcp \
+ import NetAppCloudManagerCVOGCP as my_module
+
+if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7')
+
+
+def set_module_args(args):
+ '''prepare arguments so that they will be picked up during module creation'''
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ '''Exception class to be raised by module.exit_json and caught by the test case'''
+
+
+class AnsibleFailJson(Exception):
+ '''Exception class to be raised by module.fail_json and caught by the test case'''
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over exit_json; package return data into an exception'''
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over fail_json; package return data into an exception'''
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockCMConnection():
+ ''' Mock response of http connections '''
+
+ def __init__(self, kind=None, parm1=None):
+ self.type = kind
+ self.parm1 = parm1
+ # self.token_type, self.token = self.get_token()
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'TestA',
+ 'client_id': 'test',
+ 'zone': 'us-west-1b',
+ 'vpc_id': 'vpc-test',
+ 'subnet_id': 'subnet-test',
+ 'svm_password': 'password',
+ 'refresh_token': 'myrefresh_token',
+ 'is_ha': False,
+ 'gcp_service_account': 'test_account',
+ 'data_encryption_type': 'GCP',
+ 'gcp_volume_type': 'pd-ssd',
+ 'gcp_volume_size': 500,
+ 'gcp_volume_size_unit': 'GB',
+ 'project_id': 'default-project',
+ 'tier_level': 'standard'
+ })
+
+ def set_args_create_cloudmanager_cvo_gcp(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'Dummyname',
+ 'client_id': 'test',
+ 'zone': 'us-west1-b',
+ 'vpc_id': 'vpc-test',
+ 'subnet_id': 'subnet-test',
+ 'svm_password': 'password',
+ 'refresh_token': 'myrefresh_token',
+ 'use_latest_version': False,
+ 'capacity_tier': 'cloudStorage',
+ 'ontap_version': 'ONTAP-9.10.0.T1.gcp',
+ 'is_ha': False,
+ 'gcp_service_account': 'test_account',
+ 'data_encryption_type': 'GCP',
+ 'gcp_volume_type': 'pd-ssd',
+ 'gcp_volume_size': 500,
+ 'gcp_volume_size_unit': 'GB',
+ 'gcp_labels': [{'label_key': 'key1', 'label_value': 'value1'}, {'label_key': 'keya', 'label_value': 'valuea'}],
+ 'project_id': 'default-project'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ self.rest_api = MockCMConnection()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ def set_args_delete_cloudmanager_cvo_gcp(self):
+ return dict({
+ 'state': 'absent',
+ 'name': 'Dummyname',
+ 'client_id': 'test',
+ 'zone': 'us-west-1',
+ 'vpc_id': 'vpc-test',
+ 'subnet_id': 'subnet-test',
+ 'svm_password': 'password',
+ 'refresh_token': 'myrefresh_token',
+ 'is_ha': False,
+ 'gcp_service_account': 'test_account',
+ 'data_encryption_type': 'GCP',
+ 'gcp_volume_type': 'pd-ssd',
+ 'gcp_volume_size': 500,
+ 'gcp_volume_size_unit': 'GB',
+ 'project_id': 'project-test'
+ })
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ def test_module_fail_when_required_args_present(self, get_token):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleExitJson) as exc:
+ set_module_args(self.set_default_args_pass_check())
+ get_token.return_value = 'test', 'test'
+ my_module()
+ exit_json(changed=True, msg="TestCase Fail when required args are present")
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_gcp_pass(self, get_post_api, get_working_environment_details_by_name, get_nss,
+ get_tenant, wait_on_completion, get_token):
+ set_module_args(self.set_args_create_cloudmanager_cvo_gcp())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_post_api.return_value = response, None, None
+ get_working_environment_details_by_name.return_value = None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_gcp_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_gcp_ha_pass(self, get_post_api, get_working_environment_details_by_name, get_nss,
+ get_tenant, wait_on_completion, get_token):
+ data = self.set_args_create_cloudmanager_cvo_gcp()
+ data['is_ha'] = True
+ data['license_type'] = 'ha-capacity-paygo'
+ data['capacity_package_name'] = 'Essential'
+ data['subnet0_node_and_data_connectivity'] = 'default'
+ data['subnet1_cluster_connectivity'] = 'subnet2'
+ data['subnet2_ha_connectivity'] = 'subnet3'
+ data['subnet3_data_replication'] = 'subnet1'
+ data['vpc0_node_and_data_connectivity'] = 'default'
+ data['vpc1_cluster_connectivity'] = 'vpc2'
+ data['vpc2_ha_connectivity'] = 'vpc3'
+ data['vpc3_data_replication'] = 'vpc1'
+ set_module_args(data)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_post_api.return_value = response, None, None
+ get_working_environment_details_by_name.return_value = None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_gcp_ha_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_gcp_capacity_license_pass(self, get_post_api,
+ get_working_environment_details_by_name, get_nss,
+ get_tenant, wait_on_completion, get_token):
+ data = self.set_args_create_cloudmanager_cvo_gcp()
+ data['license_type'] = 'capacity-paygo'
+ data['capacity_package_name'] = 'Essential'
+ set_module_args(data)
+
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_post_api.return_value = response, None, None
+ get_working_environment_details_by_name.return_value = None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_gcp_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_gcp_ha_capacity_license_pass(self, get_post_api,
+ get_working_environment_details_by_name, get_nss,
+ get_tenant, wait_on_completion, get_token):
+ data = self.set_args_create_cloudmanager_cvo_gcp()
+ data['license_type'] = 'ha-capacity-paygo'
+ data['capacity_package_name'] = 'Essential'
+ data['is_ha'] = True
+ data['subnet0_node_and_data_connectivity'] = 'default'
+ data['subnet1_cluster_connectivity'] = 'subnet2'
+ data['subnet2_ha_connectivity'] = 'subnet3'
+ data['subnet3_data_replication'] = 'subnet1'
+ data['vpc0_node_and_data_connectivity'] = 'default'
+ data['vpc1_cluster_connectivity'] = 'vpc2'
+ data['vpc2_ha_connectivity'] = 'vpc3'
+ data['vpc3_data_replication'] = 'vpc1'
+ set_module_args(data)
+
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_post_api.return_value = response, None, None
+ get_working_environment_details_by_name.return_value = None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_gcp_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_gcp_nodebase_license_pass(self, get_post_api,
+ get_working_environment_details_by_name, get_nss,
+ get_tenant, wait_on_completion, get_token):
+ data = self.set_args_create_cloudmanager_cvo_gcp()
+ data['license_type'] = 'gcp-cot-premium-byol'
+ data['platform_serial_number'] = '12345678'
+ set_module_args(data)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_post_api.return_value = response, None, None
+ get_working_environment_details_by_name.return_value = None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_gcp_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_tenant')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_nss')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.post')
+ def test_create_cloudmanager_cvo_gcp_ha_nodebase_license_pass(self, get_post_api,
+ get_working_environment_details_by_name, get_nss,
+ get_tenant, wait_on_completion, get_token):
+ data = self.set_args_create_cloudmanager_cvo_gcp()
+ data['is_ha'] = True
+ data['subnet0_node_and_data_connectivity'] = 'default'
+ data['subnet1_cluster_connectivity'] = 'subnet2'
+ data['subnet2_ha_connectivity'] = 'subnet3'
+ data['subnet3_data_replication'] = 'subnet1'
+ data['vpc0_node_and_data_connectivity'] = 'default'
+ data['vpc1_cluster_connectivity'] = 'vpc2'
+ data['vpc2_ha_connectivity'] = 'vpc3'
+ data['vpc3_data_replication'] = 'vpc1'
+ data['platform_serial_number_node1'] = '12345678'
+ data['platform_serial_number_node2'] = '23456789'
+ data['license_type'] = 'gcp-ha-cot-premium-byol'
+ set_module_args(data)
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'publicId': 'abcdefg12345'}
+ get_post_api.return_value = response, None, None
+ get_working_environment_details_by_name.return_value = None, None
+ get_nss.return_value = 'nss-test', None
+ get_tenant.return_value = 'test', None
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_cvo_gcp_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.delete')
+ def test_delete_cloudmanager_cvo_gcp_pass(self, get_delete_api, get_working_environment_details_by_name,
+ wait_on_completion, get_token):
+ set_module_args(self.set_args_delete_cloudmanager_cvo_gcp())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+ my_cvo = {
+ 'name': 'Dummyname',
+ 'publicId': 'test'}
+ get_working_environment_details_by_name.return_value = my_cvo, None
+
+ get_delete_api.return_value = None, None, 'test'
+ wait_on_completion.return_value = None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_delete_cloudmanager_cvo_gcp_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_instance_license_type')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_tier_level')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_cvo_tags')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_svm_password')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_property')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ def test_change_cloudmanager_cvo_gcp(self, get_cvo, get_property, get_details, update_svm_password, update_cvo_tags,
+ update_tier_level, update_instance_license_type, get_token):
+ set_module_args(self.set_args_create_cloudmanager_cvo_gcp())
+
+ modify = ['svm_password', 'gcp_labels', 'tier_level', 'instance_type']
+
+ my_cvo = {
+ 'name': 'TestA',
+ 'publicId': 'test',
+ 'cloudProviderName': 'GCP',
+ 'isHA': False,
+ 'svmName': 'svm_TestA',
+ 'svm_password': 'password',
+ 'tenantId': 'Tenant-test',
+ }
+ get_cvo.return_value = my_cvo, None
+ cvo_property = {'name': 'Dummyname',
+ 'publicId': 'test',
+ 'status': {'status': 'ON'},
+ 'ontapClusterProperties': {
+ 'capacityTierInfo': {'tierLevel': 'standard'},
+ 'licenseType': {'capacityLimit': {'size': 10.0, 'unit': 'TB'},
+ 'name': 'Cloud Volumes ONTAP Standard'},
+ 'ontapVersion': '9.10.0.T1',
+ 'writingSpeedState': 'NORMAL'},
+ 'providerProperties': {
+ 'regionName': 'us-west1',
+ 'zoneName': ['us-west1-b'],
+ 'instanceType': 'n1-standard-8',
+ 'labels': {'cloud-ontap-dm': 'anscvogcp-deployment',
+ 'cloud-ontap-version': '9_10_0_t1',
+ 'key1': 'value1',
+ 'platform-serial-number': '90920130000000001020',
+ 'working-environment-id': 'vsaworkingenvironment-cxxt6zwj'},
+ 'subnetCidr': '10.150.0.0/20',
+ 'projectName': 'default-project'},
+ 'svmName': 'svm_Dummyname',
+ 'tenantId': 'Tenant-test',
+ 'workingEnvironmentTyp': 'VSA'
+ }
+ get_property.return_value = cvo_property, None
+ cvo_details = {'cloudProviderName': 'GCP',
+ 'isHA': False,
+ 'name': 'Dummyname',
+ 'ontapClusterProperties': None,
+ 'publicId': 'test',
+ 'status': {'status': 'ON'},
+ 'userTags': {'key1': 'value1'},
+ 'workingEnvironmentType': 'VSA'}
+ get_details.return_value = cvo_details, None
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ for item in modify:
+ if item == 'svm_password':
+ update_svm_password.return_value = True, None
+ elif item == 'gcp_labels':
+ update_cvo_tags.return_value = True, None
+ elif item == 'tier_level':
+ update_tier_level.return_value = True, None
+ elif item == 'instance_type':
+ update_instance_license_type.return_value = True, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_change_cloudmanager_cvo_gcp: %s' % repr(exc.value))
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_writing_speed_state')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_instance_license_type')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_tier_level')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_cvo_tags')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.update_svm_password')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.upgrade_ontap_image')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_property')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ def test_change_cloudmanager_cvo_gcp_ha(self, get_cvo, get_property, get_details, upgrade_ontap_image, update_svm_password,
+ update_cvo_tags, update_tier_level, update_instance_license_type, update_writing_speed_state, get_token):
+ data = self.set_args_create_cloudmanager_cvo_gcp()
+ data['is_ha'] = True
+ data['svm_password'] = 'newpassword'
+ data['update_svm_password'] = True
+ data['ontap_version'] = 'ONTAP-9.10.1P3.T1.gcpha'
+ data['upgrade_ontap_version'] = True
+ data['subnet0_node_and_data_connectivity'] = 'default'
+ data['subnet1_cluster_connectivity'] = 'subnet2'
+ data['subnet2_ha_connectivity'] = 'subnet3'
+ data['subnet3_data_replication'] = 'subnet1'
+ data['vpc0_node_and_data_connectivity'] = 'default'
+ data['vpc1_cluster_connectivity'] = 'vpc2'
+ data['vpc2_ha_connectivity'] = 'vpc3'
+ data['vpc3_data_replication'] = 'vpc1'
+ data['platform_serial_number_node1'] = '12345678'
+ data['platform_serial_number_node2'] = '23456789'
+ data['license_type'] = 'gcp-ha-cot-premium-byol'
+ data['instance_type'] = 'n1-standard-8'
+ set_module_args(data)
+
+ modify = ['svm_password', 'gcp_labels', 'tier_level', 'ontap_version', 'instance_type', 'license_type']
+
+ my_cvo = {
+ 'name': 'TestA',
+ 'publicId': 'test',
+ 'cloudProviderName': 'GCP',
+ 'isHA': True,
+ 'svmName': 'svm_TestA',
+ 'svm_password': 'password',
+ 'tenantId': 'Tenant-test',
+ }
+ get_cvo.return_value = my_cvo, None
+ cvo_property = {'name': 'Dummyname',
+ 'publicId': 'test',
+ 'status': {'status': 'ON'},
+ 'ontapClusterProperties': {
+ 'capacityTierInfo': {'tierLevel': 'standard'},
+ 'licenseType': {'capacityLimit': {'size': 10.0, 'unit': 'TB'},
+ 'name': 'Cloud Volumes ONTAP Standard'},
+ 'ontapVersion': '9.10.0.T1',
+ 'upgradeVersions': [{'autoUpdateAllowed': False,
+ 'imageVersion': 'ONTAP-9.10.1P3',
+ 'lastModified': 1634467078000}],
+ 'writingSpeedState': 'NORMAL'},
+ 'providerProperties': {
+ 'regionName': 'us-west1',
+ 'zoneName': ['us-west1-b'],
+ 'instanceType': 'n1-standard-8',
+ 'labels': {'cloud-ontap-dm': 'anscvogcp-deployment',
+ 'cloud-ontap-version': '9_10_0_t1',
+ 'key1': 'value1',
+ 'platform-serial-number': '90920130000000001020',
+ 'working-environment-id': 'vsaworkingenvironment-cxxt6zwj'},
+ 'subnetCidr': '10.150.0.0/20',
+ 'projectName': 'default-project'},
+ 'svmName': 'svm_Dummyname',
+ 'tenantId': 'Tenant-test',
+ 'workingEnvironmentTyp': 'VSA'
+ }
+ get_property.return_value = cvo_property, None
+ cvo_details = {'cloudProviderName': 'GCP',
+ 'isHA': True,
+ 'name': 'Dummyname',
+ 'ontapClusterProperties': None,
+ 'publicId': 'test',
+ 'status': {'status': 'ON'},
+ 'userTags': {'key1': 'value1', 'partner-platform-serial-number': '90920140000000001019',
+ 'gcp_resource_id': '14004944518802780827', 'count-down': '3'},
+ 'workingEnvironmentType': 'VSA'}
+ get_details.return_value = cvo_details, None
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ for item in modify:
+ if item == 'svm_password':
+ update_svm_password.return_value = True, None
+ elif item == 'gcp_labels':
+ update_cvo_tags.return_value = True, None
+ elif item == 'tier_level':
+ update_tier_level.return_value = True, None
+ elif item == 'ontap_version':
+ upgrade_ontap_image.return_value = True, None
+ elif item == 'writing_speed_state':
+ update_writing_speed_state.return_value = True, None
+ elif item == 'instance_type':
+ update_instance_license_type.return_value = True, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_change_cloudmanager_cvo_gcp: %s' % repr(exc.value))
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_info.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_info.py
new file mode 100644
index 000000000..9b417ed1b
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_info.py
@@ -0,0 +1,591 @@
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests ONTAP Ansible module: '''
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import sys
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_info \
+ import NetAppCloudmanagerInfo as my_module
+
+if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7')
+
+
+def set_module_args(args):
+ '''prepare arguments so that they will be picked up during module creation'''
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ '''Exception class to be raised by module.exit_json and caught by the test case'''
+
+
+class AnsibleFailJson(Exception):
+ '''Exception class to be raised by module.fail_json and caught by the test case'''
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over exit_json; package return data into an exception'''
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over fail_json; package return data into an exception'''
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockCMConnection():
+ ''' Mock response of http connections '''
+
+ def __init__(self, kind=None, parm1=None):
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+
+# using pytest natively, without unittest.TestCase
+@pytest.fixture
+def patch_ansible():
+ with patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json) as mocks:
+ yield mocks
+
+
+def set_default_args_pass_check(patch_ansible):
+ return dict({
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'refresh_token': 'myrefresh_token',
+ })
+
+
+def set_args_get_cloudmanager_working_environments_info():
+ args = {
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'refresh_token': 'myrefresh_token',
+ 'gather_subsets': ['working_environments_info']
+ }
+ return args
+
+
+def set_args_get_cloudmanager_aggregates_info():
+ args = {
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'refresh_token': 'myrefresh_token',
+ 'gather_subsets': ['working_environments_info']
+ }
+ return args
+
+
+def set_args_get_accounts_info():
+ args = {
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'refresh_token': 'myrefresh_token',
+ 'gather_subsets': ['accounts_info']
+ }
+ return args
+
+
+def test_module_fail_when_required_args_missing(patch_ansible):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environments_info')
+def test_get_working_environments_info(working_environments_info, get_token, patch_ansible):
+ args = dict(set_args_get_cloudmanager_working_environments_info())
+ set_module_args(args)
+ get_token.return_value = 'token_type', 'token'
+ working_environments_info.return_value = {
+ "azureVsaWorkingEnvironments": [
+ {
+ "name": "testazure",
+ "cloudProviderName": "Azure",
+ "creatorUserEmail": "samlp|NetAppSAML|testuser",
+ "isHA": False,
+ "publicId": "VsaWorkingEnvironment-az123456",
+ "tenantId": "Tenant-2345",
+ "workingEnvironmentType": "VSA",
+ }
+ ],
+ "gcpVsaWorkingEnvironments": [],
+ "onPremWorkingEnvironments": [],
+ "vsaWorkingEnvironments": [
+ {
+ "name": "testAWS",
+ "cloudProviderName": "Amazon",
+ "creatorUserEmail": "samlp|NetAppSAML|testuser",
+ "isHA": False,
+ "publicId": "VsaWorkingEnvironment-aws12345",
+ "tenantId": "Tenant-2345",
+ "workingEnvironmentType": "VSA",
+ },
+ {
+ "name": "testAWSHA",
+ "cloudProviderName": "Amazon",
+ "creatorUserEmail": "samlp|NetAppSAML|testuser",
+ "isHA": True,
+ "publicId": "VsaWorkingEnvironment-awsha345",
+ "tenantId": "Tenant-2345",
+ "workingEnvironmentType": "VSA",
+ }
+ ]
+ }, None
+ my_obj = my_module()
+ my_obj.rest_api.api_root_path = "my_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_info: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+
+
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+@patch(
+ 'ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_info.NetAppCloudmanagerInfo.get_aggregates_info')
+def test_get_aggregates_info(aggregates_info, get_token, patch_ansible):
+ args = dict(set_args_get_cloudmanager_aggregates_info())
+ set_module_args(args)
+ get_token.return_value = 'token_type', 'token'
+ aggregates_info.return_value = {
+ "azureVsaWorkingEnvironments": {
+ "VsaWorkingEnvironment-az123456": [
+ {
+ "availableCapacity": {
+ "size": 430.0,
+ "unit": "GB"
+ },
+ "disks": [
+ {
+ "device": "LUN 3.1",
+ "name": "testazure-01-1",
+ "ownerNode": "testazure-01",
+ "position": "data",
+ }
+ ],
+ "encryptionType": "notEncrypted",
+ "homeNode": "testazure-01",
+ "isRoot": False,
+ "name": "aggr1",
+ "ownerNode": "testazure-01",
+ "providerVolumes": [
+ {
+ "device": "1",
+ "diskType": "Premium_LRS",
+ "encrypted": False,
+ "instanceId": "testazureid",
+ "name": "testazuredatadisk1",
+ "size": {
+ "size": 500.0,
+ "unit": "GB"
+ },
+ "state": "available"
+ }
+ ],
+ "sidlEnabled": False,
+ "snaplockType": "non_snaplock",
+ "state": "online",
+ "totalCapacity": {
+ "size": 500.0,
+ "unit": "GB"
+ },
+ "usedCapacity": {
+ "size": 70.0,
+ "unit": "GB"
+ },
+ "volumes": [
+ {
+ "isClone": False,
+ "name": "svm_testazure_root",
+ "rootVolume": True,
+ "thinProvisioned": True,
+ "totalSize": {
+ "size": 1.0,
+ "unit": "GB"
+ },
+ "usedSize": {
+ "size": 0.000339508056640625,
+ "unit": "GB"
+ }
+ },
+ {
+ "isClone": False,
+ "name": "azv1",
+ "rootVolume": False,
+ "thinProvisioned": True,
+ "totalSize": {
+ "size": 500.0,
+ "unit": "GB"
+ },
+ "usedSize": {
+ "size": 0.0,
+ "unit": "GB"
+ }
+ }
+ ]
+ },
+ ]
+ },
+ "gcpVsaWorkingEnvironments": {},
+ "onPremWorkingEnvironments": {},
+ "vsaWorkingEnvironments": {
+ "VsaWorkingEnvironment-aws12345": [
+ {
+ "availableCapacity": {
+ "size": 430.0,
+ "unit": "GB"
+ },
+ "disks": [
+ {
+ "device": "xvdh vol-381",
+ "name": "testAWSHA-01-i-196h",
+ "ownerNode": "testAWSHA-01",
+ "position": "data",
+ },
+ {
+ "device": "xvdh vol-382",
+ "name": "testAWSHA-01-i-195h",
+ "ownerNode": "testAWSHA-01",
+ "position": "data",
+ }
+ ],
+ "encryptionType": "cloudEncrypted",
+ "homeNode": "testAWSHA-01",
+ "isRoot": False,
+ "name": "aggr1",
+ "ownerNode": "testAWSHA-01",
+ "providerVolumes": [
+ {
+ "device": "/dev/xvdh",
+ "diskType": "gp2",
+ "encrypted": True,
+ "id": "vol-381",
+ "instanceId": "i-196",
+ "name": "vol-381",
+ "size": {
+ "size": 500.0,
+ "unit": "GB"
+ },
+ "state": "in-use"
+ },
+ {
+ "device": "/dev/xvdh",
+ "diskType": "gp2",
+ "encrypted": True,
+ "id": "vol-382",
+ "instanceId": "i-195",
+ "name": "vol-382",
+ "size": {
+ "size": 500.0,
+ "unit": "GB"
+ },
+ "state": "in-use"
+ }
+ ],
+ "sidlEnabled": True,
+ "snaplockType": "non_snaplock",
+ "state": "online",
+ "totalCapacity": {
+ "size": 500.0,
+ "unit": "GB"
+ },
+ "usedCapacity": {
+ "size": 70.0,
+ "unit": "GB"
+ },
+ "volumes": [
+ {
+ "isClone": False,
+ "name": "svm_testAWSHA_root",
+ "rootVolume": True,
+ "thinProvisioned": True,
+ "totalSize": {
+ "size": 1.0,
+ "unit": "GB"
+ },
+ "usedSize": {
+ "size": 0.000339508056640625,
+ "unit": "GB"
+ }
+ },
+ {
+ "isClone": False,
+ "name": "vha",
+ "rootVolume": False,
+ "thinProvisioned": True,
+ "totalSize": {
+ "size": 100.0,
+ "unit": "GB"
+ },
+ "usedSize": {
+ "size": 0.0,
+ "unit": "GB"
+ }
+ }
+ ]
+ }
+ ],
+ "VsaWorkingEnvironment-awsha345": [
+ {
+ "availableCapacity": {
+ "size": 430.0,
+ "unit": "GB"
+ },
+ "disks": [
+ {
+ "device": "xvdg vol-369",
+ "name": "testAWS-01-i-190g",
+ "ownerNode": "testAWS-01",
+ "position": "data",
+ }
+ ],
+ "encryptionType": "cloudEncrypted",
+ "homeNode": "testAWS-01",
+ "isRoot": False,
+ "name": "aggr1",
+ "ownerNode": "testAWS-01",
+ "providerVolumes": [
+ {
+ "device": "/dev/xvdg",
+ "diskType": "gp2",
+ "encrypted": True,
+ "id": "vol-369",
+ "instanceId": "i-190",
+ "name": "vol-369",
+ "size": {
+ "size": 500.0,
+ "unit": "GB"
+ },
+ "state": "in-use"
+ }
+ ],
+ "sidlEnabled": True,
+ "snaplockType": "non_snaplock",
+ "state": "online",
+ "totalCapacity": {
+ "size": 500.0,
+ "unit": "GB"
+ },
+ "usedCapacity": {
+ "size": 70.0,
+ "unit": "GB"
+ },
+ "volumes": [
+ {
+ "isClone": False,
+ "name": "svm_testAWS_root",
+ "rootVolume": True,
+ "thinProvisioned": True,
+ "totalSize": {
+ "size": 1.0,
+ "unit": "GB"
+ },
+ "usedSize": {
+ "size": 0.000339508056640625,
+ "unit": "GB"
+ }
+ },
+ {
+ "isClone": False,
+ "name": "v1",
+ "rootVolume": False,
+ "thinProvisioned": True,
+ "totalSize": {
+ "size": 100.0,
+ "unit": "GB"
+ },
+ "usedSize": {
+ "size": 0.0,
+ "unit": "GB"
+ }
+ }
+ ]
+ },
+ {
+ "availableCapacity": {
+ "size": 86.0,
+ "unit": "GB"
+ },
+ "disks": [
+ {
+ "device": "xvdh vol-371",
+ "name": "testAWS-01-i-190h",
+ "ownerNode": "testAWS-01",
+ "position": "data",
+ }
+ ],
+ "encryptionType": "cloudEncrypted",
+ "homeNode": "testAWS-01",
+ "isRoot": False,
+ "name": "aggr2",
+ "ownerNode": "testAWS-01",
+ "providerVolumes": [
+ {
+ "device": "/dev/xvdh",
+ "diskType": "gp2",
+ "encrypted": True,
+ "id": "vol-371",
+ "instanceId": "i-190",
+ "name": "vol-371",
+ "size": {
+ "size": 100.0,
+ "unit": "GB"
+ },
+ "state": "in-use"
+ }
+ ],
+ "sidlEnabled": True,
+ "snaplockType": "non_snaplock",
+ "state": "online",
+ "totalCapacity": {
+ "size": 100.0,
+ "unit": "GB"
+ },
+ "usedCapacity": {
+ "size": 0.0,
+ "unit": "GB"
+ },
+ "volumes": []
+ }
+ ]
+ }
+ }
+ my_obj = my_module()
+ my_obj.rest_api.api_root_path = "my_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_info: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
+
+
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+@patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_accounts_info')
+def test_get_accounts_info(accounts_info, get_token, patch_ansible):
+ args = dict(set_args_get_accounts_info())
+ set_module_args(args)
+ get_token.return_value = 'token_type', 'token'
+ accounts_info.return_value = {
+ "awsAccounts": [
+ {
+ "accessKey": "1",
+ "accountId": "123456789011",
+ "accountName": "tami",
+ "accountType": "AWS_KEYS",
+ "publicId": "CloudProviderAccount-Ekj6L9QX",
+ "subscriptionId": "hackExp10Days",
+ "vsaList": []
+ },
+ {
+ "accessKey": "",
+ "accountId": "123456789011",
+ "accountName": "Instance Profile",
+ "accountType": "INSTANCE_PROFILE",
+ "occmRole": "occmRole",
+ "publicId": "InstanceProfile",
+ "subscriptionId": "hackExp10Days",
+ "vsaList": [
+ {
+ "name": "CVO_AWSCluster",
+ "publicId": "VsaWorkingEnvironment-9m3I6i3I",
+ "workingEnvironmentType": "AWS"
+ },
+ {
+ "name": "testAWS1",
+ "publicId": "VsaWorkingEnvironment-JCzkA9OX",
+ "workingEnvironmentType": "AWS"
+ },
+ ]
+ }
+ ],
+ "azureAccounts": [
+ {
+ "accountName": "AzureKeys",
+ "accountType": "AZURE_KEYS",
+ "applicationId": "1",
+ "publicId": "CloudProviderAccount-T84ceMYu",
+ "tenantId": "1",
+ "vsaList": [
+ {
+ "name": "testAZURE",
+ "publicId": "VsaWorkingEnvironment-jI0tbceH",
+ "workingEnvironmentType": "AZURE"
+ },
+ {
+ "name": "test",
+ "publicId": "VsaWorkingEnvironment-00EnDcfB",
+ "workingEnvironmentType": "AZURE"
+ },
+ ]
+ },
+ {
+ "accountName": "s",
+ "accountType": "AZURE_KEYS",
+ "applicationId": "1",
+ "publicId": "CloudProviderAccount-XxbN95dj",
+ "tenantId": "1",
+ "vsaList": []
+ }
+ ],
+ "gcpStorageAccounts": [],
+ "nssAccounts": [
+ {
+ "accountName": "TESTCLOUD2",
+ "accountType": "NSS_KEYS",
+ "nssUserName": "TESTCLOUD2",
+ "publicId": "be2f3cac-352a-46b9-a341-a446c35b61c9",
+ "vsaList": [
+ {
+ "name": "testAWS",
+ "publicId": "VsaWorkingEnvironment-3txYJOsX",
+ "workingEnvironmentType": "AWS"
+ },
+ {
+ "name": "testAZURE",
+ "publicId": "VsaWorkingEnvironment-jI0tbceH",
+ "workingEnvironmentType": "AZURE"
+ },
+ ]
+ },
+ {
+ "accountName": "ntapitdemo",
+ "accountType": "NSS_KEYS",
+ "nssUserName": "ntapitdemo",
+ "publicId": "01e43a7d-cfc9-4682-aa12-15374ce81638",
+ "vsaList": [
+ {
+ "name": "test",
+ "publicId": "VsaWorkingEnvironment-00EnDcfB",
+ "workingEnvironmentType": "AZURE"
+ }
+ ]
+ }
+ ]
+ }, None
+ my_obj = my_module()
+ my_obj.rest_api.api_root_path = "my_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_info: %s' % repr(exc.value))
+ assert not exc.value.args[0]['changed']
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_nss_account.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_nss_account.py
new file mode 100644
index 000000000..a9f41beed
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_nss_account.py
@@ -0,0 +1,144 @@
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests Cloudmanager Ansible module: '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import sys
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_nss_account \
+ import NetAppCloudmanagerNssAccount as my_module
+
+if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7')
+
+
+def set_module_args(args):
+ '''prepare arguments so that they will be picked up during module creation'''
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ '''Exception class to be raised by module.exit_json and caught by the test case'''
+
+
+class AnsibleFailJson(Exception):
+ '''Exception class to be raised by module.fail_json and caught by the test case'''
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over exit_json; package return data into an exception'''
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over fail_json; package return data into an exception'''
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockCMConnection():
+ ''' Mock response of http connections '''
+ def __init__(self, kind=None, parm1=None):
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'test_nss_account',
+ 'username': 'username',
+ 'password': 'password',
+ 'client_id': 'client_id',
+ 'refresh_token': 'refrsh_token'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_nss_account.NetAppCloudmanagerNssAccount.get_nss_account')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_nss_account.NetAppCloudmanagerNssAccount.create_nss_account')
+ def test_create_nss_account_successfully(self, create, get, get_token):
+ set_module_args(self.set_default_args_pass_check())
+ get.return_value = None
+ create.return_value = None
+ get_token.return_value = ("type", "token")
+ obj = my_module()
+ obj.rest_api.api_root_path = "test_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_nss_account.NetAppCloudmanagerNssAccount.get_nss_account')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_nss_account.NetAppCloudmanagerNssAccount.create_nss_account')
+ def test_create_nss_account_idempotency(self, create, get, get_token):
+ set_module_args(self.set_default_args_pass_check())
+ get.return_value = {
+ 'name': 'test_nss_account',
+ 'username': 'TESTCLOUD1',
+ 'password': 'test_test',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'refresh_token': 'CvMJXRhz5V4dmxZqVg5LDRDlZyE - kbqRKT9YMcAsjmwFs'
+ }
+ create.return_value = None
+ get_token.return_value = ("type", "token")
+ obj = my_module()
+ obj.rest_api.api_root_path = "test_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_nss_account.NetAppCloudmanagerNssAccount.get_nss_account')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_nss_account.NetAppCloudmanagerNssAccount.delete_nss_account')
+ def test_create_nss_account_successfully(self, delete, get, get_token):
+ args = self.set_default_args_pass_check()
+ args['state'] = 'absent'
+ set_module_args(args)
+ get.return_value = {
+ 'name': 'test_nss_account',
+ 'username': 'TESTCLOUD1',
+ 'password': 'test_test',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'refresh_token': 'CvMJXRhz5V4dmxZqVg5LDRDlZyE - kbqRKT9YMcAsjmwFs'
+ }
+ delete.return_value = None
+ get_token.return_value = ("type", "token")
+ obj = my_module()
+ obj.rest_api.api_root_path = "test_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_snapmirror.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_snapmirror.py
new file mode 100644
index 000000000..9d1189489
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_snapmirror.py
@@ -0,0 +1,176 @@
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests Cloudmanager Ansible module: '''
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import sys
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_snapmirror \
+ import NetAppCloudmanagerSnapmirror as my_module
+
+if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7')
+
+
+def set_module_args(args):
+ '''prepare arguments so that they will be picked up during module creation'''
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ '''Exception class to be raised by module.exit_json and caught by the test case'''
+
+
+class AnsibleFailJson(Exception):
+ '''Exception class to be raised by module.fail_json and caught by the test case'''
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over exit_json; package return data into an exception'''
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over fail_json; package return data into an exception'''
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'source_working_environment_name': 'TestA',
+ 'destination_working_environment_name': 'TestB',
+ 'source_volume_name': 'source',
+ 'destination_volume_name': 'dest',
+ 'source_svm_name': 'source_svm',
+ 'destination_svm_name': 'dest_svm',
+ 'policy': 'MirrorAllSnapshots',
+ 'schedule': 'min',
+ 'max_transfer_rate': 102400,
+ 'client_id': 'client_id',
+ 'refresh_token': 'myrefresh_token',
+ })
+
+ def set_args_create_cloudmanager_snapmirror(self):
+ return dict({
+ 'state': 'present',
+ 'source_working_environment_name': 'TestA',
+ 'destination_working_environment_name': 'TestB',
+ 'source_volume_name': 'source',
+ 'destination_volume_name': 'dest',
+ 'source_svm_name': 'source_svm',
+ 'destination_svm_name': 'dest_svm',
+ 'policy': 'MirrorAllSnapshots',
+ 'schedule': 'min',
+ 'max_transfer_rate': 102400,
+ 'client_id': 'client_id',
+ 'refresh_token': 'myrefresh_token',
+ })
+
+ def set_args_delete_cloudmanager_snapmirror(self):
+ return dict({
+ 'state': 'absent',
+ 'source_working_environment_name': 'TestA',
+ 'destination_working_environment_name': 'TestB',
+ 'source_volume_name': 'source',
+ 'destination_volume_name': 'dest',
+ 'client_id': 'client_id',
+ 'refresh_token': 'myrefresh_token',
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ def test_module_fail_when_required_args_present(self, get_token):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleExitJson) as exc:
+ set_module_args(self.set_default_args_pass_check())
+ get_token.return_value = 'test', 'test'
+ my_module()
+ exit_json(changed=True, msg="TestCase Fail when required args are present")
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_detail_for_snapmirror')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.wait_on_completion')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_snapmirror.NetAppCloudmanagerSnapmirror.get_snapmirror')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_snapmirror.NetAppCloudmanagerSnapmirror.build_quote_request')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_snapmirror.NetAppCloudmanagerSnapmirror.quote_volume')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_snapmirror.NetAppCloudmanagerSnapmirror.get_volumes')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_snapmirror.NetAppCloudmanagerSnapmirror.get_interclusterlifs')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+ def test_create_cloudmanager_snapmirror_create_pass(self, send_request, get_interclusterlifs, get_volumes, quote_volume, build_quote_request,
+ get_snapmirror, wait_on_completion, get_working_environment_detail_for_snapmirror, get_token):
+ set_module_args(self.set_args_create_cloudmanager_snapmirror())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ response = {'id': 'abcdefg12345'}
+ source_we_info = {'publicId': 'test1', 'workingEnvironmentType': 'AMAZON'}
+ dest_we_info = {'publicId': 'test2', 'workingEnvironmentType': 'AMAZON', 'svmName': 'source_svm', 'name': 'TestB'}
+ source_vol = [{'name': 'source', 'svmName': 'source_svm', 'providerVolumeType': 'abc'}]
+ quote_volume_response = {'numOfDisks': 10, 'aggregateName': 'aggr1'}
+ interclusterlifs_resp = {'interClusterLifs': [{'address': '10.10.10.10'}], 'peerInterClusterLifs': [{'address': '10.10.10.10'}]}
+ get_working_environment_detail_for_snapmirror.return_value = source_we_info, dest_we_info, None
+ send_request.return_value = response, None, None
+ wait_on_completion.return_value = None
+ get_snapmirror.return_value = None
+ get_volumes.return_value = source_vol
+ build_quote_request.return_value = {'name': 'test'}
+ quote_volume.return_value = quote_volume_response
+ get_interclusterlifs.return_value = interclusterlifs_resp
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_create_cloudmanager_snapmirror_create_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_snapmirror.NetAppCloudmanagerSnapmirror.get_snapmirror')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+ def test_delete_cloudmanager_snapmirror_delete_pass(self, send_request, get_snapmirror, get_token):
+ set_module_args(self.set_args_delete_cloudmanager_snapmirror())
+ get_token.return_value = 'test', 'test'
+ my_obj = my_module()
+
+ my_snapmirror = {
+ 'source_working_environment_id': '456',
+ 'destination_svm_name': 'dest_svm',
+ 'destination_working_environment_id': '123'}
+ get_snapmirror.return_value = my_snapmirror
+ send_request.return_value = None, None, None
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ my_obj.apply()
+ print('Info: test_delete_cloudmanager_snapmirror_delete_pass: %s' % repr(exc.value))
+ assert exc.value.args[0]['changed']
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_volume.py b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_volume.py
new file mode 100644
index 000000000..15b4802df
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/plugins/modules/test_na_cloudmanager_volume.py
@@ -0,0 +1,216 @@
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+''' unit tests Cloudmanager Ansible module: '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import json
+import sys
+import pytest
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.netapp.cloudmanager.tests.unit.compat import unittest
+from ansible_collections.netapp.cloudmanager.tests.unit.compat.mock import patch, Mock
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume \
+ import NetAppCloudmanagerVolume as my_module
+
+if not netapp_utils.HAS_REQUESTS and sys.version_info < (3, 5):
+ pytestmark = pytest.mark.skip('skipping as missing required imports on 2.6 and 2.7')
+
+
+def set_module_args(args):
+ '''prepare arguments so that they will be picked up during module creation'''
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ '''Exception class to be raised by module.exit_json and caught by the test case'''
+
+
+class AnsibleFailJson(Exception):
+ '''Exception class to be raised by module.fail_json and caught by the test case'''
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over exit_json; package return data into an exception'''
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ '''function to patch over fail_json; package return data into an exception'''
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class MockCMConnection():
+ ''' Mock response of http connections '''
+ def __init__(self, kind=None, parm1=None):
+ self.type = kind
+ self.parm1 = parm1
+ self.xml_in = None
+ self.xml_out = None
+
+
+class TestMyModule(unittest.TestCase):
+ ''' a group of related Unit Tests '''
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'testvol',
+ 'working_environment_id': 'VsaWorkingEnvironment-abcdefg12345',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'svm_name': 'svm_justinaws',
+ 'snapshot_policy_name': 'default',
+ 'tiering_policy': 'auto',
+ 'export_policy_type': 'custom',
+ 'export_policy_ip': ["10.30.0.1/16"],
+ 'export_policy_nfs_version': ["nfs4"],
+ 'refresh_token': 'myrefresh_token',
+ 'size': 10,
+ })
+
+ def set_default_args_with_workingenv_name_pass_check(self):
+ return dict({
+ 'state': 'present',
+ 'name': 'testvol',
+ 'working_environment_name': 'weone',
+ 'client_id': 'Nw4Q2O1kdnLtvhwegGalFnodEHUfPJWh',
+ 'svm_name': 'svm_justinaws',
+ 'snapshot_policy_name': 'default',
+ 'export_policy_type': 'custom',
+ 'export_policy_ip': ["10.30.0.1/16"],
+ 'export_policy_nfs_version': ["nfs4"],
+ 'refresh_token': 'myrefresh_token',
+ 'size': 10,
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ ''' required arguments are reported as errors '''
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ my_module()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.get_volume')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.create_volume')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+ def test_create_volume_successfully(self, send_request, create, get, get_token):
+ set_module_args(self.set_default_args_pass_check())
+ get.return_value = None
+ create.return_value = None
+ send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, None)]
+ get_token.return_value = ("type", "token")
+ obj = my_module()
+ obj.rest_api.api_root_path = "test_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.get_volume')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+ def test_create_volume_idempotency(self, send_request, get, get_token):
+ set_module_args(self.set_default_args_pass_check())
+ get.return_value = {
+ 'name': 'testvol',
+ 'snapshot_policy_name': 'default',
+ 'export_policy_type': 'custom',
+ 'export_policy_ip': ["10.30.0.1/16"],
+ 'export_policy_nfs_version': ["nfs4"],
+ }
+ send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, None)]
+ get_token.return_value = ("type", "token")
+ obj = my_module()
+ obj.rest_api.api_root_path = "test_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.modify_volume')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.get_volume')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+ def test_update_volume_successfully(self, send_request, get, get_token, modify):
+ set_module_args(self.set_default_args_pass_check())
+ get.return_value = {
+ 'name': 'testvol',
+ 'snapshot_policy_name': 'default',
+ 'tiering_policy': 'snapshot_only',
+ 'export_policy_type': 'custom',
+ 'export_policy_ip': ["10.30.0.1/16"],
+ 'export_policy_nfs_version': ["nfs3", "nfs4"],
+ }
+ send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, None)]
+ get_token.return_value = ("type", "token")
+ modify.return_value = None
+ obj = my_module()
+ obj.rest_api.api_root_path = "test_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.modify_volume')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.get_volume')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+ def test_update_volume_idempotency(self, send_request, get, get_token, modify):
+ set_module_args(self.set_default_args_pass_check())
+ get.return_value = {
+ 'name': 'testvol',
+ 'snapshot_policy_name': 'default',
+ 'export_policy_type': 'custom',
+ 'export_policy_ip': ["10.30.0.1/16"],
+ 'export_policy_nfs_version': ["nfs4"],
+ }
+ send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, None)]
+ get_token.return_value = ("type", "token")
+ modify.return_value = None
+ obj = my_module()
+ obj.rest_api.api_root_path = "test_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert not exc.value.args[0]['changed']
+
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.get_token')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module.NetAppModule.get_working_environment_details_by_name')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.get_volume')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.modules.na_cloudmanager_volume.NetAppCloudmanagerVolume.create_volume')
+ @patch('ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp.CloudManagerRestAPI.send_request')
+ def test_create_volume_by_workingenv_name_successfully(self, send_request, create, get, get_we, get_token):
+ args = self.set_default_args_with_workingenv_name_pass_check()
+ my_we = {
+ 'name': 'test',
+ 'publicId': 'test',
+ 'cloudProviderName': 'Amazon'}
+ get_we.return_value = my_we, None
+ args['working_environment_id'] = my_we['publicId']
+ set_module_args(args)
+ get.return_value = None
+ create.return_value = None
+ send_request.side_effect = [({'publicId': 'id', 'svmName': 'svm_name', 'cloudProviderName': "aws", 'isHA': False}, None, None)]
+ get_token.return_value = ("type", "token")
+ obj = my_module()
+ obj.rest_api.api_root_path = "test_root_path"
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj.apply()
+ assert exc.value.args[0]['changed']
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/requirements-azure.txt b/ansible_collections/netapp/cloudmanager/tests/unit/requirements-azure.txt
new file mode 100644
index 000000000..484081daa
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/requirements-azure.txt
@@ -0,0 +1 @@
+cryptography>=3.2.0 ; python_version >= '3.5' \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/tests/unit/requirements.txt b/ansible_collections/netapp/cloudmanager/tests/unit/requirements.txt
new file mode 100644
index 000000000..88c25079f
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/tests/unit/requirements.txt
@@ -0,0 +1,10 @@
+boto3 ; python_version >= '3.5'
+botocore ; python_version >= '3.5'
+azure-mgmt-compute ; python_version >= '3.5'
+azure-mgmt-network ; python_version >= '3.5'
+azure-mgmt-storage ; python_version >= '3.5'
+azure-mgmt-resource ; python_version >= '3.5'
+azure-cli-core ; python_version >= '3.5'
+msrestazure ; python_version >= '3.5'
+azure-common ; python_version >= '3.5'
+google-auth ; python_version >= '3.5'