summaryrefslogtreecommitdiffstats
path: root/ansible_collections/netapp/cloudmanager/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
commit975f66f2eebe9dadba04f275774d4ab83f74cf25 (patch)
tree89bd26a93aaae6a25749145b7e4bca4a1e75b2be /ansible_collections/netapp/cloudmanager/plugins
parentInitial commit. (diff)
downloadansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.tar.xz
ansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.zip
Adding upstream version 7.7.0+dfsg.upstream/7.7.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/netapp/cloudmanager/plugins')
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/README.md31
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/doc_fragments/netapp.py48
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp.py332
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp_module.py1381
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aggregate.py332
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aws_fsx.py458
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cifs_server.py265
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_aws.py655
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_azure.py591
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_gcp.py644
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_aws.py855
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_azure.py746
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_gcp.py858
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_info.py235
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_nss_account.py192
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_snapmirror.py471
-rw-r--r--ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_volume.py660
17 files changed, 8754 insertions, 0 deletions
diff --git a/ansible_collections/netapp/cloudmanager/plugins/README.md b/ansible_collections/netapp/cloudmanager/plugins/README.md
new file mode 100644
index 000000000..6541cf7cf
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/README.md
@@ -0,0 +1,31 @@
+# Collections Plugins Directory
+
+This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that
+is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that
+would contain module utils and modules respectively.
+
+Here is an example directory of the majority of plugins currently supported by Ansible:
+
+```
+└── plugins
+ ├── action
+ ├── become
+ ├── cache
+ ├── callback
+ ├── cliconf
+ ├── connection
+ ├── filter
+ ├── httpapi
+ ├── inventory
+ ├── lookup
+ ├── module_utils
+ ├── modules
+ ├── netconf
+ ├── shell
+ ├── strategy
+ ├── terminal
+ ├── test
+ └── vars
+```
+
+A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible/2.9/plugins/plugins.html). \ No newline at end of file
diff --git a/ansible_collections/netapp/cloudmanager/plugins/doc_fragments/netapp.py b/ansible_collections/netapp/cloudmanager/plugins/doc_fragments/netapp.py
new file mode 100644
index 000000000..76807bb1c
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/doc_fragments/netapp.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, NetApp Ansible Team <ng-ansibleteam@netapp.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Documentation fragment for CLOUDMANAGER
+ CLOUDMANAGER = """
+options:
+ refresh_token:
+ type: str
+ description:
+ - The refresh token for NetApp Cloud Manager API operations.
+
+ sa_secret_key:
+ type: str
+ description:
+ - The service account secret key for NetApp Cloud Manager API operations.
+
+ sa_client_id:
+ type: str
+ description:
+ - The service account secret client ID for NetApp Cloud Manager API operations.
+
+ environment:
+ type: str
+ description:
+ - The environment for NetApp Cloud Manager API operations.
+ default: prod
+ choices: ['prod', 'stage']
+ version_added: 21.8.0
+
+ feature_flags:
+ description:
+ - Enable or disable a new feature.
+ - This can be used to enable an experimental feature or disable a new feature that breaks backward compatibility.
+ - Supported keys and values are subject to change without notice. Unknown keys are ignored.
+ type: dict
+ version_added: 21.11.0
+notes:
+ - The modules prefixed with na_cloudmanager are built to manage CloudManager and CVO deployments in AWS/GCP/Azure clouds.
+ - If sa_client_id and sa_secret_key are provided, service account will be used in operations. refresh_token will be ignored.
+"""
diff --git a/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp.py b/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp.py
new file mode 100644
index 000000000..eaecc8f00
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp.py
@@ -0,0 +1,332 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2017-2021, NetApp Ansible Team <ng-ansibleteam@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+netapp.py: wrapper around send_requests and other utilities
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import time
+from ansible.module_utils.basic import missing_required_lib
+
+try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+COLLECTION_VERSION = "21.22.0"
+PROD_ENVIRONMENT = {
+ 'CLOUD_MANAGER_HOST': 'cloudmanager.cloud.netapp.com',
+ 'AUTH0_DOMAIN': 'netapp-cloud-account.auth0.com',
+ 'SA_AUTH_HOST': 'cloudmanager.cloud.netapp.com/auth/oauth/token',
+ 'AUTH0_CLIENT': 'Mu0V1ywgYteI6w1MbD15fKfVIUrNXGWC',
+ 'AMI_FILTER': 'Setup-As-Service-AMI-Prod*',
+ 'AWS_ACCOUNT': '952013314444',
+ 'GCP_IMAGE_PROJECT': 'netapp-cloudmanager',
+ 'GCP_IMAGE_FAMILY': 'cloudmanager',
+ 'CVS_HOST_NAME': 'https://api.services.cloud.netapp.com'
+}
+STAGE_ENVIRONMENT = {
+ 'CLOUD_MANAGER_HOST': 'staging.cloudmanager.cloud.netapp.com',
+ 'AUTH0_DOMAIN': 'staging-netapp-cloud-account.auth0.com',
+ 'SA_AUTH_HOST': 'staging.cloudmanager.cloud.netapp.com/auth/oauth/token',
+ 'AUTH0_CLIENT': 'O6AHa7kedZfzHaxN80dnrIcuPBGEUvEv',
+ 'AMI_FILTER': 'Setup-As-Service-AMI-*',
+ 'AWS_ACCOUNT': '282316784512',
+ 'GCP_IMAGE_PROJECT': 'tlv-automation',
+ 'GCP_IMAGE_FAMILY': 'occm-automation',
+ 'CVS_HOST_NAME': 'https://staging.api.services.cloud.netapp.com'
+}
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+
+POW2_BYTE_MAP = dict(
+ # Here, 1 kb = 1024
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+)
+
+
+LOG = logging.getLogger(__name__)
+LOG_FILE = '/tmp/cloudmanager_apis.log'
+
+
+def cloudmanager_host_argument_spec():
+
+ return dict(
+ refresh_token=dict(required=False, type='str', no_log=True),
+ sa_client_id=dict(required=False, type='str', no_log=True),
+ sa_secret_key=dict(required=False, type='str', no_log=True),
+ environment=dict(required=False, type='str', choices=['prod', 'stage'], default='prod'),
+ feature_flags=dict(required=False, type='dict')
+ )
+
+
+def has_feature(module, feature_name):
+ feature = get_feature(module, feature_name)
+ if isinstance(feature, bool):
+ return feature
+ module.fail_json(msg="Error: expected bool type for feature flag: %s, found %s" % (feature_name, type(feature)))
+
+
+def get_feature(module, feature_name):
+ ''' if the user has configured the feature, use it
+ otherwise, use our default
+ '''
+ default_flags = dict(
+ trace_apis=False, # if True, append REST requests/responses to /tmp/cloudmanager_apis.log
+ trace_headers=False, # if True, and if trace_apis is True, include <large> headers in trace
+ show_modified=True,
+ simulator=False, # if True, it is running on simulator
+ )
+
+ if module.params['feature_flags'] is not None and feature_name in module.params['feature_flags']:
+ return module.params['feature_flags'][feature_name]
+ if feature_name in default_flags:
+ return default_flags[feature_name]
+ module.fail_json(msg="Internal error: unexpected feature flag: %s" % feature_name)
+
+
+class CloudManagerRestAPI(object):
+ """ wrapper around send_request """
+ def __init__(self, module, timeout=60):
+ self.module = module
+ self.timeout = timeout
+ self.refresh_token = self.module.params['refresh_token']
+ self.sa_client_id = self.module.params['sa_client_id']
+ self.sa_secret_key = self.module.params['sa_secret_key']
+ self.environment = self.module.params['environment']
+ if self.environment == 'prod':
+ self.environment_data = PROD_ENVIRONMENT
+ elif self.environment == 'stage':
+ self.environment_data = STAGE_ENVIRONMENT
+ self.url = 'https://'
+ self.api_root_path = None
+ self.check_required_library()
+ if has_feature(module, 'trace_apis'):
+ logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s')
+ self.log_headers = has_feature(module, 'trace_headers') # requires trace_apis to do anything
+ self.simulator = has_feature(module, 'simulator')
+ self.token_type, self.token = self.get_token()
+
+ def check_required_library(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'))
+
+ def format_client_id(self, client_id):
+ return client_id if client_id.endswith('clients') else client_id + 'clients'
+
+ def build_url(self, api):
+ # most requests are sent to Cloud Manager, but for connectors we need to manage VM instances using AWS, Azure, or GCP APIs
+ if api.startswith('http'):
+ return api
+ # add host if API starts with / and host is not already included in self.url
+ prefix = self.environment_data['CLOUD_MANAGER_HOST'] if self.environment_data['CLOUD_MANAGER_HOST'] not in self.url and api.startswith('/') else ''
+ return self.url + prefix + api
+
+ def send_request(self, method, api, params, json=None, data=None, header=None, authorized=True):
+ ''' send http request and process response, including error conditions '''
+ url = self.build_url(api)
+ headers = {
+ 'Content-type': "application/json",
+ 'Referer': "Ansible_NetApp",
+ }
+ if authorized:
+ headers['Authorization'] = self.token_type + " " + self.token
+ if header is not None:
+ headers.update(header)
+ for __ in range(3):
+ json_dict, error_details, on_cloud_request_id = self._send_request(method, url, params, json, data, headers)
+ # we observe this error with DELETE on agents-mgmt/agent (and sometimes on GET)
+ if error_details is not None and 'Max retries exceeded with url:' in error_details:
+ time.sleep(5)
+ else:
+ break
+ return json_dict, error_details, on_cloud_request_id
+
+ def _send_request(self, method, url, params, json, data, headers):
+ json_dict = None
+ json_error = None
+ error_details = None
+ on_cloud_request_id = None
+ response = None
+ status_code = None
+
+ def get_json(response):
+ ''' extract json, and error message if present '''
+ error = None
+ try:
+ json = response.json()
+ except ValueError:
+ return None, None
+ success_code = [200, 201, 202]
+ if response.status_code not in success_code:
+ error = json.get('message')
+ self.log_error(response.status_code, 'HTTP error: %s' % error)
+ return json, error
+
+ self.log_request(method=method, url=url, params=params, json=json, data=data, headers=headers)
+ try:
+ response = requests.request(method, url, headers=headers, timeout=self.timeout, params=params, json=json, data=data)
+ status_code = response.status_code
+ if status_code >= 300 or status_code < 200:
+ self.log_error(status_code, 'HTTP status code error: %s' % response.content)
+ return response.content, str(status_code), on_cloud_request_id
+ # If the response was successful, no Exception will be raised
+ json_dict, json_error = get_json(response)
+ if response.headers.get('OnCloud-Request-Id', '') != '':
+ on_cloud_request_id = response.headers.get('OnCloud-Request-Id')
+ except requests.exceptions.HTTPError as err:
+ self.log_error(status_code, 'HTTP error: %s' % err)
+ error_details = str(err)
+ except requests.exceptions.ConnectionError as err:
+ self.log_error(status_code, 'Connection error: %s' % err)
+ error_details = str(err)
+ except Exception as err:
+ self.log_error(status_code, 'Other error: %s' % err)
+ error_details = str(err)
+ if json_error is not None:
+ self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error))
+ error_details = json_error
+ if response:
+ self.log_debug(status_code, response.content)
+ return json_dict, error_details, on_cloud_request_id
+
+ # If an error was reported in the json payload, it is handled below
+ def get(self, api, params=None, header=None):
+ method = 'GET'
+ return self.send_request(method=method, api=api, params=params, json=None, header=header)
+
+ def post(self, api, data, params=None, header=None, gcp_type=False, authorized=True):
+ method = 'POST'
+ if gcp_type:
+ return self.send_request(method=method, api=api, params=params, data=data, header=header)
+ else:
+ return self.send_request(method=method, api=api, params=params, json=data, header=header, authorized=authorized)
+
+ def patch(self, api, data, params=None, header=None):
+ method = 'PATCH'
+ return self.send_request(method=method, api=api, params=params, json=data, header=header)
+
+ def put(self, api, data, params=None, header=None):
+ method = 'PUT'
+ return self.send_request(method=method, api=api, params=params, json=data, header=header)
+
+ def delete(self, api, data, params=None, header=None):
+ method = 'DELETE'
+ return self.send_request(method=method, api=api, params=params, json=data, header=header)
+
+ def get_token(self):
+ if self.sa_client_id is not None and self.sa_client_id != "" and self.sa_secret_key is not None and self.sa_secret_key != "":
+ response, error, ocr_id = self.post(self.environment_data['SA_AUTH_HOST'],
+ data={"grant_type": "client_credentials", "client_secret": self.sa_secret_key,
+ "client_id": self.sa_client_id, "audience": "https://api.cloud.netapp.com"},
+ authorized=False)
+ elif self.refresh_token is not None and self.refresh_token != "":
+ response, error, ocr_id = self.post(self.environment_data['AUTH0_DOMAIN'] + '/oauth/token',
+ data={"grant_type": "refresh_token", "refresh_token": self.refresh_token,
+ "client_id": self.environment_data['AUTH0_CLIENT'],
+ "audience": "https://api.cloud.netapp.com"},
+ authorized=False)
+ else:
+ self.module.fail_json(msg='Missing refresh_token or sa_client_id and sa_secret_key')
+
+ if error:
+ self.module.fail_json(msg='Error acquiring token: %s, %s' % (str(error), str(response)))
+ token = response['access_token']
+ token_type = response['token_type']
+
+ return token_type, token
+
+ def wait_on_completion(self, api_url, action_name, task, retries, wait_interval):
+ while True:
+ cvo_status, failure_error_message, error = self.check_task_status(api_url)
+ if error is not None:
+ return error
+ if cvo_status == -1:
+ return 'Failed to %s %s, error: %s' % (task, action_name, failure_error_message)
+ elif cvo_status == 1:
+ return None # success
+ # status value 0 means pending
+ if retries == 0:
+ return 'Taking too long for %s to %s or not properly setup' % (action_name, task)
+ time.sleep(wait_interval)
+ retries = retries - 1
+
+ def check_task_status(self, api_url):
+ headers = {
+ 'X-Agent-Id': self.format_client_id(self.module.params['client_id'])
+ }
+
+ network_retries = 3
+ while True:
+ result, error, dummy = self.get(api_url, None, header=headers)
+ if error is not None:
+ if network_retries <= 0:
+ return 0, '', error
+ time.sleep(1)
+ network_retries -= 1
+ else:
+ response = result
+ break
+ return response['status'], response['error'], None
+
+ def log_error(self, status_code, message):
+ LOG.error("%s: %s", status_code, message)
+
+ def log_debug(self, status_code, content):
+ LOG.debug("%s: %s", status_code, content)
+
+ def log_request(self, method, params, url, json, data, headers):
+ contents = {
+ 'method': method,
+ 'url': url,
+ 'json': json,
+ 'data': data
+ }
+ if params:
+ contents['params'] = params
+ if self.log_headers:
+ contents['headers'] = headers
+ self.log_debug('sending', repr(contents))
diff --git a/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp_module.py b/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp_module.py
new file mode 100644
index 000000000..aa73f205a
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/module_utils/netapp_module.py
@@ -0,0 +1,1381 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2022, Laurent Nicolas <laurentn@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+''' Support class for NetApp ansible modules '''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from copy import deepcopy
+import json
+import re
+import base64
+import time
+
+
+def cmp(a, b):
+ '''
+ Python 3 does not have a cmp function, this will do the cmp.
+ :param a: first object to check
+ :param b: second object to check
+ :return:
+ '''
+ # convert to lower case for string comparison.
+ if a is None:
+ return -1
+ if isinstance(a, str) and isinstance(b, str):
+ a = a.lower()
+ b = b.lower()
+ # if list has string element, convert string to lower case.
+ if isinstance(a, list) and isinstance(b, list):
+ a = [x.lower() if isinstance(x, str) else x for x in a]
+ b = [x.lower() if isinstance(x, str) else x for x in b]
+ a.sort()
+ b.sort()
+ return (a > b) - (a < b)
+
+
+class NetAppModule(object):
+ '''
+ Common class for NetApp modules
+ set of support functions to derive actions based
+ on the current state of the system, and a desired state
+ '''
+
+ def __init__(self):
+ self.log = []
+ self.changed = False
+ self.parameters = {'name': 'not intialized'}
+
+ def set_parameters(self, ansible_params):
+ self.parameters = {}
+ for param in ansible_params:
+ if ansible_params[param] is not None:
+ self.parameters[param] = ansible_params[param]
+ return self.parameters
+
+ def get_cd_action(self, current, desired):
+ ''' takes a desired state and a current state, and return an action:
+ create, delete, None
+ eg:
+ is_present = 'absent'
+ some_object = self.get_object(source)
+ if some_object is not None:
+ is_present = 'present'
+ action = cd_action(current=is_present, desired = self.desired.state())
+ '''
+ desired_state = desired['state'] if 'state' in desired else 'present'
+ if current is None and desired_state == 'absent':
+ return None
+ if current is not None and desired_state == 'present':
+ return None
+ # change in state
+ self.changed = True
+ if current is not None:
+ return 'delete'
+ return 'create'
+
+ def compare_and_update_values(self, current, desired, keys_to_compare):
+ updated_values = {}
+ is_changed = False
+ for key in keys_to_compare:
+ if key in current:
+ if key in desired and desired[key] is not None:
+ if current[key] != desired[key]:
+ updated_values[key] = desired[key]
+ is_changed = True
+ else:
+ updated_values[key] = current[key]
+ else:
+ updated_values[key] = current[key]
+
+ return updated_values, is_changed
+
+ def get_working_environments_info(self, rest_api, headers):
+ '''
+ Get all working environments info
+ '''
+ api = "/occm/api/working-environments"
+ response, error, dummy = rest_api.get(api, None, header=headers)
+ if error is not None:
+ return response, error
+ else:
+ return response, None
+
+ def look_up_working_environment_by_name_in_list(self, we_list, name):
+ '''
+ Look up working environment by the name in working environment list
+ '''
+ for we in we_list:
+ if we['name'] == name:
+ return we, None
+ return None, "look_up_working_environment_by_name_in_list: Working environment not found"
+
+ def get_working_environment_details_by_name(self, rest_api, headers, name, provider=None):
+ '''
+ Use working environment name to get working environment details including:
+ name: working environment name,
+ publicID: working environment ID
+ cloudProviderName,
+ isHA,
+ svmName
+ '''
+ # check the working environment exist or not
+ api = "/occm/api/working-environments/exists/" + name
+ response, error, dummy = rest_api.get(api, None, header=headers)
+ if error is not None:
+ return None, error
+
+ # get working environment lists
+ api = "/occm/api/working-environments"
+ response, error, dummy = rest_api.get(api, None, header=headers)
+ if error is not None:
+ return None, error
+ # look up the working environment in the working environment lists
+ if provider is None or provider == 'onPrem':
+ working_environment_details, error = self.look_up_working_environment_by_name_in_list(response['onPremWorkingEnvironments'], name)
+ if error is None:
+ return working_environment_details, None
+ if provider is None or provider == 'gcp':
+ working_environment_details, error = self.look_up_working_environment_by_name_in_list(response['gcpVsaWorkingEnvironments'], name)
+ if error is None:
+ return working_environment_details, None
+ if provider is None or provider == 'azure':
+ working_environment_details, error = self.look_up_working_environment_by_name_in_list(response['azureVsaWorkingEnvironments'], name)
+ if error is None:
+ return working_environment_details, None
+ if provider is None or provider == 'aws':
+ working_environment_details, error = self.look_up_working_environment_by_name_in_list(response['vsaWorkingEnvironments'], name)
+ if error is None:
+ return working_environment_details, None
+ return None, "get_working_environment_details_by_name: Working environment not found"
+
+ def get_working_environment_details(self, rest_api, headers):
+ '''
+ Use working environment id to get working environment details including:
+ name: working environment name,
+ publicID: working environment ID
+ cloudProviderName,
+ ontapClusterProperties,
+ isHA,
+ status,
+ userTags,
+ workingEnvironmentType,
+ '''
+ api = "/occm/api/working-environments/"
+ api += self.parameters['working_environment_id']
+ response, error, dummy = rest_api.get(api, None, header=headers)
+ if error:
+ return None, "Error: get_working_environment_details %s" % error
+ return response, None
+
+ def get_aws_fsx_details(self, rest_api, header=None, name=None):
+ '''
+ Use working environment id and tenantID to get working environment details including:
+ name: working environment name,
+ publicID: working environment ID
+ '''
+ api = "/fsx-ontap/working-environments/"
+ api += self.parameters['tenant_id']
+ count = 0
+ fsx_details = None
+ if name is None:
+ name = self.parameters['name']
+ response, error, dummy = rest_api.get(api, None, header=header)
+ if error:
+ return response, "Error: get_aws_fsx_details %s" % error
+ for each in response:
+ if each['name'] == name:
+ count += 1
+ fsx_details = each
+ if self.parameters.get('working_environment_id'):
+ if each['id'] == self.parameters['working_environment_id']:
+ return each, None
+ if count == 1:
+ return fsx_details, None
+ elif count > 1:
+ return response, "More than one AWS FSx found for %s, use working_environment_id for delete" \
+ "or use different name for create" % name
+ return None, None
+
+ def get_aws_fsx_details_by_id(self, rest_api, header=None):
+ '''
+ Use working environment id and tenantID to get working environment details including:
+ publicID: working environment ID
+ '''
+ api = "/fsx-ontap/working-environments/%s" % self.parameters['tenant_id']
+ response, error, dummy = rest_api.get(api, None, header=header)
+ if error:
+ return response, "Error: get_aws_fsx_details %s" % error
+ for each in response:
+ if self.parameters.get('destination_working_environment_id') and each['id'] == self.parameters['destination_working_environment_id']:
+ return each, None
+ return None, None
+
+ def get_aws_fsx_details_by_name(self, rest_api, header=None):
+ '''
+ Use working environment name and tenantID to get working environment details including:
+ name: working environment name,
+ '''
+ api = "/fsx-ontap/working-environments/%s" % self.parameters['tenant_id']
+ count = 0
+ fsx_details = None
+ response, error, dummy = rest_api.get(api, None, header=header)
+ if error:
+ return response, "Error: get_aws_fsx_details_by_name %s" % error
+ for each in response:
+ if each['name'] == self.parameters['destination_working_environment_name']:
+ count += 1
+ fsx_details = each
+ if count == 1:
+ return fsx_details['id'], None
+ if count > 1:
+ return response, "More than one AWS FSx found for %s" % self.parameters['name']
+ return None, None
+
+ def get_aws_fsx_svm(self, rest_api, id, header=None):
+ '''
+ Use working environment id and tenantID to get FSx svm details including:
+ publicID: working environment ID
+ '''
+ api = "/occm/api/fsx/working-environments/%s/svms" % id
+ response, error, dummy = rest_api.get(api, None, header=header)
+ if error:
+ return response, "Error: get_aws_fsx_svm %s" % error
+ if len(response) == 0:
+ return None, "Error: no SVM found for %s" % id
+ return response[0]['name'], None
+
+ def get_working_environment_detail_for_snapmirror(self, rest_api, headers):
+
+ source_working_env_detail, dest_working_env_detail = {}, {}
+ if self.parameters.get('source_working_environment_id'):
+ api = '/occm/api/working-environments'
+ working_env_details, error, dummy = rest_api.get(api, None, header=headers)
+ if error:
+ return None, None, "Error getting WE info: %s: %s" % (error, working_env_details)
+ for dummy, values in working_env_details.items():
+ for each in values:
+ if each['publicId'] == self.parameters['source_working_environment_id']:
+ source_working_env_detail = each
+ break
+ elif self.parameters.get('source_working_environment_name'):
+ source_working_env_detail, error = self.get_working_environment_details_by_name(rest_api, headers,
+ self.parameters['source_working_environment_name'])
+ if error:
+ return None, None, error
+ else:
+ return None, None, "Cannot find working environment by source_working_environment_id or source_working_environment_name"
+
+ if self.parameters.get('destination_working_environment_id'):
+ if self.parameters['destination_working_environment_id'].startswith('fs-'):
+ if self.parameters.get('tenant_id'):
+ working_env_details, error = self.get_aws_fsx_details_by_id(rest_api, header=headers)
+ if error:
+ return None, None, "Error getting WE info for FSx: %s: %s" % (error, working_env_details)
+ dest_working_env_detail['publicId'] = self.parameters['destination_working_environment_id']
+ svm_name, error = self.get_aws_fsx_svm(rest_api, self.parameters['destination_working_environment_id'], header=headers)
+ if error:
+ return None, None, "Error getting svm name for FSx: %s" % error
+ dest_working_env_detail['svmName'] = svm_name
+ else:
+ return None, None, "Cannot find FSx WE by destination WE %s, missing tenant_id" % self.parameters['destination_working_environment_id']
+ else:
+ api = '/occm/api/working-environments'
+ working_env_details, error, dummy = rest_api.get(api, None, header=headers)
+ if error:
+ return None, None, "Error getting WE info: %s: %s" % (error, working_env_details)
+ for dummy, values in working_env_details.items():
+ for each in values:
+ if each['publicId'] == self.parameters['destination_working_environment_id']:
+ dest_working_env_detail = each
+ break
+ elif self.parameters.get('destination_working_environment_name'):
+ if self.parameters.get('tenant_id'):
+ fsx_id, error = self.get_aws_fsx_details_by_name(rest_api, header=headers)
+ if error:
+ return None, None, "Error getting WE info for FSx: %s" % error
+ dest_working_env_detail['publicId'] = fsx_id
+ svm_name, error = self.get_aws_fsx_svm(rest_api, fsx_id, header=headers)
+ if error:
+ return None, None, "Error getting svm name for FSx: %s" % error
+ dest_working_env_detail['svmName'] = svm_name
+ else:
+ dest_working_env_detail, error = self.get_working_environment_details_by_name(rest_api, headers,
+ self.parameters['destination_working_environment_name'])
+ if error:
+ return None, None, error
+ else:
+ return None, None, "Cannot find working environment by destination_working_environment_id or destination_working_environment_name"
+
+ return source_working_env_detail, dest_working_env_detail, None
+
+ def create_account(self, rest_api):
+ """
+ Create Account
+ :return: Account ID
+ """
+ # TODO? do we need to create an account? And the code below is broken
+ return None, 'Error: creating an account is not supported.'
+ # headers = {
+ # "X-User-Token": rest_api.token_type + " " + rest_api.token,
+ # }
+
+ # api = '/tenancy/account/MyAccount'
+ # account_res, error, dummy = rest_api.post(api, header=headers)
+ # account_id = None if error is not None else account_res['accountPublicId']
+ # return account_id, error
+
+ def get_or_create_account(self, rest_api):
+ """
+ Get Account
+ :return: Account ID
+ """
+ accounts, error = self.get_account_info(rest_api)
+ if error is not None:
+ return None, error
+ if len(accounts) == 0:
+ return None, 'Error: account cannot be located - check credentials or provide account_id.'
+ # TODO? creating an account is not supported
+ # return self.create_account(rest_api)
+
+ return accounts[0]['accountPublicId'], None
+
+ def get_account_info(self, rest_api, headers=None):
+ """
+ Get Account
+ :return: Account ID
+ """
+ headers = {
+ "X-User-Token": rest_api.token_type + " " + rest_api.token,
+ }
+
+ api = '/tenancy/account'
+ account_res, error, dummy = rest_api.get(api, header=headers)
+ if error is not None:
+ return None, error
+ return account_res, None
+
+ def get_account_id(self, rest_api):
+ accounts, error = self.get_account_info(rest_api)
+ if error:
+ return None, error
+ if not accounts:
+ return None, 'Error: no account found - check credentials or provide account_id.'
+ return accounts[0]['accountPublicId'], None
+
+ def get_accounts_info(self, rest_api, headers):
+ '''
+ Get all accounts info
+ '''
+ api = "/occm/api/accounts"
+ response, error, dummy = rest_api.get(api, None, header=headers)
+ if error is not None:
+ return None, error
+ else:
+ return response, None
+
+ def set_api_root_path(self, working_environment_details, rest_api):
+ '''
+ set API url root path based on the working environment provider
+ '''
+ provider = working_environment_details['cloudProviderName'] if working_environment_details.get('cloudProviderName') else None
+ api_root_path = None
+ if self.parameters['working_environment_id'].startswith('fs-'):
+ api_root_path = "/occm/api/fsx"
+ elif provider == "Amazon":
+ api_root_path = "/occm/api/aws/ha" if working_environment_details['isHA'] else "/occm/api/vsa"
+ elif working_environment_details['isHA']:
+ api_root_path = "/occm/api/" + provider.lower() + "/ha"
+ else:
+ api_root_path = "/occm/api/" + provider.lower() + "/vsa"
+ rest_api.api_root_path = api_root_path
+
+ def have_required_parameters(self, action):
+ '''
+ Check if all the required parameters in self.params are available or not besides the mandatory parameters
+ '''
+ actions = {'create_aggregate': ['number_of_disks', 'disk_size_size', 'disk_size_unit', 'working_environment_id'],
+ 'update_aggregate': ['number_of_disks', 'disk_size_size', 'disk_size_unit', 'working_environment_id'],
+ 'delete_aggregate': ['working_environment_id'],
+ }
+ missed_params = [
+ parameter
+ for parameter in actions[action]
+ if parameter not in self.parameters
+ ]
+
+ if not missed_params:
+ return True, None
+ else:
+ return False, missed_params
+
+ def get_modified_attributes(self, current, desired, get_list_diff=False):
+ ''' takes two dicts of attributes and return a dict of attributes that are
+ not in the current state
+ It is expected that all attributes of interest are listed in current and
+ desired.
+ :param: current: current attributes in ONTAP
+ :param: desired: attributes from playbook
+ :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
+ :return: dict of attributes to be modified
+ :rtype: dict
+
+ NOTE: depending on the attribute, the caller may need to do a modify or a
+ different operation (eg move volume if the modified attribute is an
+ aggregate name)
+ '''
+ # if the object does not exist, we can't modify it
+ modified = {}
+ if current is None:
+ return modified
+
+ # error out if keys do not match
+ # self.check_keys(current, desired)
+
+ # collect changed attributes
+ for key, value in current.items():
+ if key in desired and desired[key] is not None:
+ if isinstance(value, list):
+ modified_list = self.compare_lists(value, desired[key], get_list_diff) # get modified list from current and desired
+ if modified_list is not None:
+ modified[key] = modified_list
+ elif isinstance(value, dict):
+ modified_dict = self.get_modified_attributes(value, desired[key])
+ if modified_dict:
+ modified[key] = modified_dict
+ else:
+ try:
+ result = cmp(value, desired[key])
+ except TypeError as exc:
+ raise TypeError("%s, key: %s, value: %s, desired: %s" % (repr(exc), key, repr(value), repr(desired[key])))
+ else:
+ if result != 0:
+ modified[key] = desired[key]
+ if modified:
+ self.changed = True
+ return modified
+
+ @staticmethod
+ def compare_lists(current, desired, get_list_diff):
+ ''' compares two lists and return a list of elements that are either the desired elements or elements that are
+ modified from the current state depending on the get_list_diff flag
+ :param: current: current item attribute in ONTAP
+ :param: desired: attributes from playbook
+ :param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
+ :return: list of attributes to be modified
+ :rtype: list
+ '''
+ current_copy = deepcopy(current)
+ desired_copy = deepcopy(desired)
+
+ # get what in desired and not in current
+ desired_diff_list = list()
+ for item in desired:
+ if item in current_copy:
+ current_copy.remove(item)
+ else:
+ desired_diff_list.append(item)
+
+ # get what in current but not in desired
+ current_diff_list = []
+ for item in current:
+ if item in desired_copy:
+ desired_copy.remove(item)
+ else:
+ current_diff_list.append(item)
+
+ if desired_diff_list or current_diff_list:
+ # there are changes
+ if get_list_diff:
+ return desired_diff_list
+ else:
+ return desired
+ else:
+ return None
+
+ @staticmethod
+ def convert_module_args_to_api(parameters, exclusion=None):
+ '''
+ Convert a list of string module args to API option format.
+ For example, convert test_option to testOption.
+ :param parameters: dict of parameters to be converted.
+ :param exclusion: list of parameters to be ignored.
+ :return: dict of key value pairs.
+ '''
+ exclude_list = ['api_url', 'token_type', 'refresh_token', 'sa_secret_key', 'sa_client_id']
+ if exclusion is not None:
+ exclude_list += exclusion
+ api_keys = {}
+ for k, v in parameters.items():
+ if k not in exclude_list:
+ words = k.split("_")
+ api_key = ""
+ for word in words:
+ if len(api_key) > 0:
+ word = word.title()
+ api_key += word
+ api_keys[api_key] = v
+ return api_keys
+
+ @staticmethod
+ def convert_data_to_tabbed_jsonstring(data):
+ '''
+ Convert a dictionary data to json format string
+ '''
+ dump = json.dumps(data, indent=2, separators=(',', ': '))
+ return re.sub(
+ '\n +',
+ lambda match: '\n' + '\t' * int(len(match.group().strip('\n')) / 2),
+ dump,
+ )
+
+ @staticmethod
+ def encode_certificates(certificate_file):
+ '''
+ Read certificate file and encode it
+ '''
+ try:
+ with open(certificate_file, mode='rb') as fh:
+ cert = fh.read()
+ except (OSError, IOError) as exc:
+ return None, str(exc)
+ if not cert:
+ return None, "Error: file is empty"
+ return base64.b64encode(cert).decode('utf-8'), None
+
+ @staticmethod
+ def get_occm_agents_by_account(rest_api, account_id):
+ """
+ Collect a list of agents matching account_id.
+ :return: list of agents, error
+ """
+ params = {'account_id': account_id}
+ api = "/agents-mgmt/agent"
+ headers = {
+ "X-User-Token": rest_api.token_type + " " + rest_api.token,
+ }
+ agents, error, dummy = rest_api.get(api, header=headers, params=params)
+ return agents, error
+
+ def get_occm_agents_by_name(self, rest_api, account_id, name, provider):
+ """
+ Collect a list of agents matching account_id, name, and provider.
+ :return: list of agents, error
+ """
+ # I tried to query by name and provider in addition to account_id, but it returned everything
+ agents, error = self.get_occm_agents_by_account(rest_api, account_id)
+ if isinstance(agents, dict) and 'agents' in agents:
+ agents = [agent for agent in agents['agents'] if agent['name'] == name and agent['provider'] == provider]
+ return agents, error
+
+ def get_agents_info(self, rest_api, headers):
+ """
+ Collect a list of agents matching account_id.
+ :return: list of agents, error
+ """
+ account_id, error = self.get_account_id(rest_api)
+ if error:
+ return None, error
+ agents, error = self.get_occm_agents_by_account(rest_api, account_id)
+ return agents, error
+
+ def get_active_agents_info(self, rest_api, headers):
+ """
+ Collect a list of agents matching account_id.
+ :return: list of agents, error
+ """
+ clients = []
+ account_id, error = self.get_account_id(rest_api)
+ if error:
+ return None, error
+ agents, error = self.get_occm_agents_by_account(rest_api, account_id)
+ if isinstance(agents, dict) and 'agents' in agents:
+ agents = [agent for agent in agents['agents'] if agent['status'] == 'active']
+ clients = [{'name': agent['name'], 'client_id': agent['agentId'], 'provider': agent['provider']} for agent in agents]
+ return clients, error
+
+ @staticmethod
+ def get_occm_agent_by_id(rest_api, client_id):
+ """
+ Fetch OCCM agent given its client id
+ :return: agent details, error
+ """
+ api = "/agents-mgmt/agent/" + rest_api.format_client_id(client_id)
+ headers = {
+ "X-User-Token": rest_api.token_type + " " + rest_api.token,
+ }
+ response, error, dummy = rest_api.get(api, header=headers)
+ if isinstance(response, dict) and 'agent' in response:
+ agent = response['agent']
+ return agent, error
+ return response, error
+
+ @staticmethod
+ def check_occm_status(rest_api, client_id):
+ """
+ Check OCCM status
+ :return: status
+ DEPRECATED - use get_occm_agent_by_id but the retrun value format is different!
+ """
+
+ api = "/agents-mgmt/agent/" + rest_api.format_client_id(client_id)
+ headers = {
+ "X-User-Token": rest_api.token_type + " " + rest_api.token,
+ }
+ occm_status, error, dummy = rest_api.get(api, header=headers)
+ return occm_status, error
+
+ def register_agent_to_service(self, rest_api, provider, vpc):
+ '''
+ register agent to service
+ '''
+ api = '/agents-mgmt/connector-setup'
+
+ headers = {
+ "X-User-Token": rest_api.token_type + " " + rest_api.token,
+ }
+ body = {
+ "accountId": self.parameters['account_id'],
+ "name": self.parameters['name'],
+ "company": self.parameters['company'],
+ "placement": {
+ "provider": provider,
+ "region": self.parameters['region'],
+ "network": vpc,
+ "subnet": self.parameters['subnet_id'],
+ },
+ "extra": {
+ "proxy": {
+ "proxyUrl": self.parameters.get('proxy_url'),
+ "proxyUserName": self.parameters.get('proxy_user_name'),
+ "proxyPassword": self.parameters.get('proxy_password'),
+ }
+ }
+ }
+
+ if provider == "AWS":
+ body['placement']['network'] = vpc
+
+ response, error, dummy = rest_api.post(api, body, header=headers)
+ return response, error
+
+ def delete_occm(self, rest_api, client_id):
+ '''
+ delete occm
+ '''
+ api = '/agents-mgmt/agent/' + rest_api.format_client_id(client_id)
+ headers = {
+ "X-User-Token": rest_api.token_type + " " + rest_api.token,
+ "X-Tenancy-Account-Id": self.parameters['account_id'],
+ }
+
+ occm_status, error, dummy = rest_api.delete(api, None, header=headers)
+ return occm_status, error
+
+ def delete_occm_agents(self, rest_api, agents):
+ '''
+ delete a list of occm
+ '''
+ results = []
+ for agent in agents:
+ if 'agentId' in agent:
+ occm_status, error = self.delete_occm(rest_api, agent['agentId'])
+ else:
+ occm_status, error = None, 'unexpected agent contents: %s' % repr(agent)
+ if error:
+ results.append((occm_status, error))
+ return results
+
+ @staticmethod
+ def call_parameters():
+ return """
+ {
+ "location": {
+ "value": "string"
+ },
+ "virtualMachineName": {
+ "value": "string"
+ },
+ "virtualMachineSize": {
+ "value": "string"
+ },
+ "networkSecurityGroupName": {
+ "value": "string"
+ },
+ "adminUsername": {
+ "value": "string"
+ },
+ "virtualNetworkId": {
+ "value": "string"
+ },
+ "adminPassword": {
+ "value": "string"
+ },
+ "subnetId": {
+ "value": "string"
+ },
+ "customData": {
+ "value": "string"
+ },
+ "environment": {
+ "value": "prod"
+ },
+ "storageAccount": {
+ "value": "string"
+ }
+ }
+ """
+
+ @staticmethod
+ def call_template():
+ return """
+ {
+ "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
+ "contentVersion": "1.0.0.0",
+ "parameters": {
+ "location": {
+ "type": "string",
+ "defaultValue": "eastus"
+ },
+ "virtualMachineName": {
+ "type": "string"
+ },
+ "virtualMachineSize":{
+ "type": "string"
+ },
+ "adminUsername": {
+ "type": "string"
+ },
+ "virtualNetworkId": {
+ "type": "string"
+ },
+ "networkSecurityGroupName": {
+ "type": "string"
+ },
+ "adminPassword": {
+ "type": "securestring"
+ },
+ "subnetId": {
+ "type": "string"
+ },
+ "customData": {
+ "type": "string"
+ },
+ "environment": {
+ "type": "string",
+ "defaultValue": "prod"
+ },
+ "storageAccount": {
+ "type": "string"
+ }
+ },
+ "variables": {
+ "vnetId": "[parameters('virtualNetworkId')]",
+ "subnetRef": "[parameters('subnetId')]",
+ "networkInterfaceName": "[concat(parameters('virtualMachineName'),'-nic')]",
+ "diagnosticsStorageAccountName": "[parameters('storageAccount')]",
+ "diagnosticsStorageAccountId": "[concat('Microsoft.Storage/storageAccounts/', variables('diagnosticsStorageAccountName'))]",
+ "diagnosticsStorageAccountType": "Standard_LRS",
+ "publicIpAddressName": "[concat(parameters('virtualMachineName'),'-ip')]",
+ "publicIpAddressType": "Dynamic",
+ "publicIpAddressSku": "Basic",
+ "msiExtensionName": "ManagedIdentityExtensionForLinux",
+ "occmOffer": "[if(equals(parameters('environment'), 'stage'), 'netapp-oncommand-cloud-manager-staging-preview', 'netapp-oncommand-cloud-manager')]"
+ },
+ "resources": [
+ {
+ "name": "[parameters('virtualMachineName')]",
+ "type": "Microsoft.Compute/virtualMachines",
+ "apiVersion": "2018-04-01",
+ "location": "[parameters('location')]",
+ "dependsOn": [
+ "[concat('Microsoft.Network/networkInterfaces/', variables('networkInterfaceName'))]",
+ "[concat('Microsoft.Storage/storageAccounts/', variables('diagnosticsStorageAccountName'))]"
+ ],
+ "properties": {
+ "osProfile": {
+ "computerName": "[parameters('virtualMachineName')]",
+ "adminUsername": "[parameters('adminUsername')]",
+ "adminPassword": "[parameters('adminPassword')]",
+ "customData": "[base64(parameters('customData'))]"
+ },
+ "hardwareProfile": {
+ "vmSize": "[parameters('virtualMachineSize')]"
+ },
+ "storageProfile": {
+ "imageReference": {
+ "publisher": "netapp",
+ "offer": "[variables('occmOffer')]",
+ "sku": "occm-byol",
+ "version": "latest"
+ },
+ "osDisk": {
+ "createOption": "fromImage",
+ "managedDisk": {
+ "storageAccountType": "Premium_LRS"
+ }
+ },
+ "dataDisks": []
+ },
+ "networkProfile": {
+ "networkInterfaces": [
+ {
+ "id": "[resourceId('Microsoft.Network/networkInterfaces', variables('networkInterfaceName'))]"
+ }
+ ]
+ },
+ "diagnosticsProfile": {
+ "bootDiagnostics": {
+ "enabled": true,
+ "storageUri":
+ "[concat('https://', variables('diagnosticsStorageAccountName'), '.blob.core.windows.net/')]"
+ }
+ }
+ },
+ "plan": {
+ "name": "occm-byol",
+ "publisher": "netapp",
+ "product": "[variables('occmOffer')]"
+ },
+ "identity": {
+ "type": "systemAssigned"
+ }
+ },
+ {
+ "apiVersion": "2017-12-01",
+ "type": "Microsoft.Compute/virtualMachines/extensions",
+ "name": "[concat(parameters('virtualMachineName'),'/', variables('msiExtensionName'))]",
+ "location": "[parameters('location')]",
+ "dependsOn": [
+ "[concat('Microsoft.Compute/virtualMachines/', parameters('virtualMachineName'))]"
+ ],
+ "properties": {
+ "publisher": "Microsoft.ManagedIdentity",
+ "type": "[variables('msiExtensionName')]",
+ "typeHandlerVersion": "1.0",
+ "autoUpgradeMinorVersion": true,
+ "settings": {
+ "port": 50342
+ }
+ }
+ },
+ {
+ "name": "[variables('diagnosticsStorageAccountName')]",
+ "type": "Microsoft.Storage/storageAccounts",
+ "apiVersion": "2015-06-15",
+ "location": "[parameters('location')]",
+ "properties": {
+ "accountType": "[variables('diagnosticsStorageAccountType')]"
+ }
+ },
+ {
+ "name": "[variables('networkInterfaceName')]",
+ "type": "Microsoft.Network/networkInterfaces",
+ "apiVersion": "2018-04-01",
+ "location": "[parameters('location')]",
+ "dependsOn": [
+ "[concat('Microsoft.Network/publicIpAddresses/', variables('publicIpAddressName'))]"
+ ],
+ "properties": {
+ "ipConfigurations": [
+ {
+ "name": "ipconfig1",
+ "properties": {
+ "subnet": {
+ "id": "[variables('subnetRef')]"
+ },
+ "privateIPAllocationMethod": "Dynamic",
+ "publicIpAddress": {
+ "id": "[resourceId(resourceGroup().name,'Microsoft.Network/publicIpAddresses', variables('publicIpAddressName'))]"
+ }
+ }
+ }
+ ],
+ "networkSecurityGroup": {
+ "id": "[parameters('networkSecurityGroupName')]"
+ }
+ }
+ },
+ {
+ "name": "[variables('publicIpAddressName')]",
+ "type": "Microsoft.Network/publicIpAddresses",
+ "apiVersion": "2017-08-01",
+ "location": "[parameters('location')]",
+ "properties": {
+ "publicIpAllocationMethod": "[variables('publicIpAddressType')]"
+ },
+ "sku": {
+ "name": "[variables('publicIpAddressSku')]"
+ }
+ }
+ ],
+ "outputs": {
+ "publicIpAddressName": {
+ "type": "string",
+ "value": "[variables('publicIpAddressName')]"
+ }
+ }
+ }
+ """
+
+ def get_tenant(self, rest_api, headers):
+ """
+ Get workspace ID (tenant)
+ """
+ api = '/occm/api/tenants'
+ response, error, dummy = rest_api.get(api, header=headers)
+ if error is not None:
+ return None, 'Error: unexpected response on getting tenant for cvo: %s, %s' % (str(error), str(response))
+
+ return response[0]['publicId'], None
+
+ def get_nss(self, rest_api, headers):
+ """
+ Get nss account
+ """
+ api = '/occm/api/accounts'
+ response, error, dummy = rest_api.get(api, header=headers)
+ if error is not None:
+ return None, 'Error: unexpected response on getting nss for cvo: %s, %s' % (str(error), str(response))
+
+ if len(response['nssAccounts']) == 0:
+ return None, "Error: could not find any NSS account"
+
+ return response['nssAccounts'][0]['publicId'], None
+
+ def get_working_environment_property(self, rest_api, headers, fields):
+ # GET /vsa/working-environments/{workingEnvironmentId}?fields=status,awsProperties,ontapClusterProperties
+ api = '%s/working-environments/%s' % (rest_api.api_root_path, self.parameters['working_environment_id'])
+ params = {'fields': ','.join(fields)}
+ response, error, dummy = rest_api.get(api, params=params, header=headers)
+ if error:
+ return None, "Error: get_working_environment_property %s" % error
+ return response, None
+
+ def user_tag_key_unique(self, tag_list, key_name):
+ checked_keys = []
+ for t in tag_list:
+ if t[key_name] in checked_keys:
+ return False, 'Error: %s %s must be unique' % (key_name, t[key_name])
+ else:
+ checked_keys.append(t[key_name])
+ return True, None
+
+ def current_label_exist(self, current, desired, is_ha=False):
+ current_key_set = set(current.keys())
+ # Ignore auto generated gcp label in CVO GCP HA
+ current_key_set.discard('gcp_resource_id')
+ current_key_set.discard('count-down')
+ if is_ha:
+ current_key_set.discard('partner-platform-serial-number')
+ # python 2.6 doe snot support set comprehension
+ desired_keys = set([a_dict['label_key'] for a_dict in desired])
+ if current_key_set.issubset(desired_keys):
+ return True, None
+ else:
+ return False, 'Error: label_key %s in gcp_label cannot be removed' % str(current_key_set)
+
+ def is_label_value_changed(self, current_tags, desired_tags):
+ tag_keys = list(current_tags.keys())
+ user_tag_keys = [key for key in tag_keys if
+ key not in ('count-down', 'gcp_resource_id', 'partner-platform-serial-number')]
+ desired_keys = [a_dict['label_key'] for a_dict in desired_tags]
+ if user_tag_keys == desired_keys:
+ for tag in desired_tags:
+ if current_tags[tag['label_key']] != tag['label_value']:
+ return True
+ return False
+ else:
+ return True
+
+ def compare_gcp_labels(self, current_tags, user_tags, is_ha):
+ '''
+ Update user-tag API behaves differently in GCP CVO.
+ It only supports adding gcp_labels and modifying the values of gcp_labels. Removing gcp_label is not allowed.
+ '''
+ # check if any current gcp_labels are going to be removed or not
+ # gcp HA has one extra gcp_label created automatically
+ resp, error = self.user_tag_key_unique(user_tags, 'label_key')
+ if error is not None:
+ return None, error
+ # check if any current key labels are in the desired key labels
+ resp, error = self.current_label_exist(current_tags, user_tags, is_ha)
+ if error is not None:
+ return None, error
+ if self.is_label_value_changed(current_tags, user_tags):
+ return True, None
+ else:
+ # no change
+ return None, None
+
+ def compare_cvo_tags_labels(self, current_tags, user_tags):
+ '''
+ Compare exiting tags/labels and user input tags/labels to see if there is a change
+ gcp_labels: label_key, label_value
+ aws_tag/azure_tag: tag_key, tag_label
+ '''
+ # azure has one extra azure_tag DeployedByOccm created automatically and it cannot be modified.
+ tag_keys = list(current_tags.keys())
+ user_tag_keys = [key for key in tag_keys if key != 'DeployedByOccm']
+ current_len = len(user_tag_keys)
+ resp, error = self.user_tag_key_unique(user_tags, 'tag_key')
+ if error is not None:
+ return None, error
+ if len(user_tags) != current_len:
+ return True, None
+ # Check if tags/labels of desired configuration in current working environment
+ for item in user_tags:
+ if item['tag_key'] in current_tags and item['tag_value'] != current_tags[item['tag_key']]:
+ return True, None
+ elif item['tag_key'] not in current_tags:
+ return True, None
+ return False, None
+
+ def is_cvo_tags_changed(self, rest_api, headers, parameters, tag_name):
+ '''
+ Since tags/laabels are CVO optional parameters, this function needs to cover with/without tags/labels on both lists
+ '''
+ # get working environment details by working environment ID
+ current, error = self.get_working_environment_details(rest_api, headers)
+ if error is not None:
+ return None, 'Error: Cannot find working environment %s error: %s' % (self.parameters['working_environment_id'], str(error))
+ self.set_api_root_path(current, rest_api)
+ # compare tags
+ # no tags in current cvo
+ if 'userTags' not in current or len(current['userTags']) == 0:
+ return tag_name in parameters, None
+
+ if tag_name == 'gcp_labels':
+ if tag_name in parameters:
+ return self.compare_gcp_labels(current['userTags'], parameters[tag_name], current['isHA'])
+ # if both are empty, no need to update
+ # Ignore auto generated gcp label in CVO GCP
+ # 'count-down', 'gcp_resource_id', and 'partner-platform-serial-number'(HA)
+ tag_keys = list(current['userTags'].keys())
+ user_tag_keys = [key for key in tag_keys if key not in ('count-down', 'gcp_resource_id', 'partner-platform-serial-number')]
+ if not user_tag_keys:
+ return False, None
+ else:
+ return None, 'Error: Cannot remove current gcp_labels'
+ # no tags in input parameters
+ if tag_name not in parameters:
+ return True, None
+ else:
+ # has tags in input parameters and existing CVO
+ return self.compare_cvo_tags_labels(current['userTags'], parameters[tag_name])
+
+ def get_license_type(self, rest_api, headers, provider, region, instance_type, ontap_version, license_name):
+ # Permutation query example:
+ # aws: /metadata/permutations?region=us-east-1&instance_type=m5.xlarge&version=ONTAP-9.10.1.T1
+ # azure: /metadata/permutations?region=westus&instance_type=Standard_E4s_v3&version=ONTAP-9.10.1.T1.azure
+ # gcp: /metadata/permutations?region=us-east1&instance_type=n2-standard-4&version=ONTAP-9.10.1.T1.gcp
+ # The examples of the ontapVersion in ontapClusterProperties response:
+ # AWS for both single and HA: 9.10.1RC1, 9.8
+ # AZURE single: 9.10.1RC1.T1.azure. For HA: 9.10.1RC1.T1.azureha
+ # GCP for both single and HA: 9.10.1RC1.T1, 9.8.T1
+ # To be used in permutation:
+ # AWS ontap_version format: ONTAP-x.x.x.T1 or ONTAP-x.x.x.T1.ha for Ha
+ # AZURE ontap_version format: ONTAP-x.x.x.T1.azure or ONTAP-x.x.x.T1.azureha for HA
+ # GCP ontap_version format: ONTAP-x.x.x.T1.gcp or ONTAP-x.x.x.T1.gcpha for HA
+ version = 'ONTAP-' + ontap_version
+ if provider == 'aws':
+ version += '.T1.ha' if self.parameters['is_ha'] else '.T1'
+ elif provider == 'gcp':
+ version += '.T1' if not ontap_version.endswith('T1') else ''
+ version += '.gcpha' if self.parameters['is_ha'] else '.gcp'
+ api = '%s/metadata/permutations' % rest_api.api_root_path
+ params = {'region': region,
+ 'version': version,
+ 'instance_type': instance_type
+ }
+ response, error, dummy = rest_api.get(api, params=params, header=headers)
+ if error:
+ return None, "Error: get_license_type %s %s" % (response, error)
+ for item in response:
+ if item['license']['name'] == license_name:
+ return item['license']['type'], None
+
+ return None, "Error: get_license_type cannot get license type %s" % response
+
+ def get_modify_cvo_params(self, rest_api, headers, desired, provider):
+ modified = []
+ if desired['update_svm_password']:
+ modified = ['svm_password']
+ # Get current working environment property
+ properties = ['status', 'ontapClusterProperties.fields(upgradeVersions)']
+ # instanceType in aws case is stored in awsProperties['instances'][0]['instanceType']
+ if provider == 'aws':
+ properties.append('awsProperties')
+ else:
+ properties.append('providerProperties')
+
+ we, err = self.get_working_environment_property(rest_api, headers, properties)
+
+ if err is not None:
+ return None, err
+
+ if we['status'] is None or we['status']['status'] != 'ON':
+ return None, "Error: get_modify_cvo_params working environment %s status is not ON. Operation cannot be performed." % we['publicId']
+
+ tier_level = None
+ if we['ontapClusterProperties']['capacityTierInfo'] is not None:
+ tier_level = we['ontapClusterProperties']['capacityTierInfo']['tierLevel']
+
+ # collect changed attributes
+ if tier_level is not None and tier_level != desired['tier_level']:
+ if provider == 'azure':
+ if desired['capacity_tier'] == 'Blob':
+ modified.append('tier_level')
+ elif provider == 'aws':
+ if desired['capacity_tier'] == 'S3':
+ modified.append('tier_level')
+ elif provider == 'gcp':
+ if desired['capacity_tier'] == 'cloudStorage':
+ modified.append('tier_level')
+
+ if 'svm_name' in desired and we['svmName'] != desired['svm_name']:
+ modified.append('svm_name')
+
+ if 'writing_speed_state' in desired:
+ if we['ontapClusterProperties']['writingSpeedState'] != desired['writing_speed_state'].upper():
+ modified.append('writing_speed_state')
+
+ if provider == 'aws':
+ current_instance_type = we['awsProperties']['instances'][0]['instanceType']
+ region = we['awsProperties']['regionName']
+ else:
+ current_instance_type = we['providerProperties']['instanceType']
+ region = we['providerProperties']['regionName']
+
+ if current_instance_type != desired['instance_type']:
+ modified.append('instance_type')
+
+ # check if license type is changed
+ current_license_type, error = self.get_license_type(rest_api, headers, provider, region, current_instance_type,
+ we['ontapClusterProperties']['ontapVersion'],
+ we['ontapClusterProperties']['licenseType']['name'])
+ if err is not None:
+ return None, error
+ if current_license_type != desired['license_type']:
+ modified.append('license_type')
+
+ if desired['upgrade_ontap_version'] is True:
+ if desired['use_latest_version'] or desired['ontap_version'] == 'latest':
+ return None, "Error: To upgrade ONTAP image, the ontap_version must be a specific version"
+ current_version = 'ONTAP-' + we['ontapClusterProperties']['ontapVersion']
+ if not desired['ontap_version'].startswith(current_version):
+ if we['ontapClusterProperties']['upgradeVersions'] is not None:
+ available_versions = []
+ for image_info in we['ontapClusterProperties']['upgradeVersions']:
+ available_versions.append(image_info['imageVersion'])
+ # AWS ontap_version format: ONTAP-x.x.x.Tx or ONTAP-x.x.x.Tx.ha for Ha
+ # AZURE ontap_version format: ONTAP-x.x.x.Tx.azure or .azureha for HA
+ # GCP ontap_version format: ONTAP-x.x.x.Tx.gcp or .gcpha for HA
+ # Tx is not relevant for ONTAP version. But it is needed for the CVO creation
+ # upgradeVersion imageVersion format: ONTAP-x.x.x
+ if desired['ontap_version'].startswith(image_info['imageVersion']):
+ modified.append('ontap_version')
+ break
+ else:
+ return None, "Error: No ONTAP image available for version %s. Available versions: %s" % (desired['ontap_version'], available_versions)
+
+ tag_name = {
+ 'aws': 'aws_tag',
+ 'azure': 'azure_tag',
+ 'gcp': 'gcp_labels'
+ }
+
+ need_change, error = self.is_cvo_tags_changed(rest_api, headers, desired, tag_name[provider])
+ if error is not None:
+ return None, error
+ if need_change:
+ modified.append(tag_name[provider])
+
+ # The updates of followings are not supported. Will response failure.
+ for key, value in desired.items():
+ if key == 'project_id' and we['providerProperties']['projectName'] != value:
+ modified.append('project_id')
+ if key == 'zone' and we['providerProperties']['zoneName'][0] != value:
+ modified.append('zone')
+ if key == 'cidr' and we['providerProperties']['vnetCidr'] != value:
+ modified.append('cidr')
+ if key == 'location' and we['providerProperties']['regionName'] != value:
+ modified.append('location')
+ if key == 'availability_zone' and we['providerProperties']['availabilityZone'] != value:
+ modified.append('availability_zone')
+
+ if modified:
+ self.changed = True
+ return modified, None
+
+ def is_cvo_update_needed(self, rest_api, headers, parameters, changeable_params, provider):
+ modify, error = self.get_modify_cvo_params(rest_api, headers, parameters, provider)
+ if error is not None:
+ return None, error
+ unmodifiable = [attr for attr in modify if attr not in changeable_params]
+ if unmodifiable:
+ return None, "%s cannot be modified." % str(unmodifiable)
+
+ return modify, None
+
+ def wait_cvo_update_complete(self, rest_api, headers):
+ retry_count = 65
+ if self.parameters['is_ha'] is True:
+ retry_count *= 2
+ for count in range(retry_count):
+ # get CVO status
+ we, err = self.get_working_environment_property(rest_api, headers, ['status'])
+ if err is not None:
+ return False, 'Error: get_working_environment_property failed: %s' % (str(err))
+ if we['status']['status'] != "UPDATING":
+ return True, None
+ time.sleep(60)
+
+ return False, 'Error: Taking too long for CVO to be active after update or not properly setup'
+
+ def update_cvo_tags(self, api_root, rest_api, headers, tag_name, tag_list):
+ body = {}
+ tags = []
+ if tag_list is not None:
+ for tag in tag_list:
+ atag = {
+ 'tagKey': tag['label_key'] if tag_name == "gcp_labels" else tag['tag_key'],
+ 'tagValue': tag['label_value'] if tag_name == "gcp_labels" else tag['tag_value']
+ }
+ tags.append(atag)
+ body['tags'] = tags
+
+ response, err, dummy = rest_api.put(api_root + "user-tags", body, header=headers)
+ if err is not None:
+ return False, 'Error: unexpected response on modifying tags: %s, %s' % (str(err), str(response))
+
+ return True, None
+
+ def update_svm_password(self, api_root, rest_api, headers, svm_password):
+ body = {'password': svm_password}
+ response, err, dummy = rest_api.put(api_root + "set-password", body, header=headers)
+ if err is not None:
+ return False, 'Error: unexpected response on modifying svm_password: %s, %s' % (str(err), str(response))
+
+ return True, None
+
+ def update_svm_name(self, api_root, rest_api, headers, svm_name):
+ # get current svmName
+ we, err = self.get_working_environment_property(rest_api, headers, ['ontapClusterProperties.fields(upgradeVersions)'])
+ if err is not None:
+ return False, 'Error: get_working_environment_property failed: %s' % (str(err))
+ body = {'svmNewName': svm_name,
+ 'svmName': we['svmName']}
+ response, err, dummy = rest_api.put(api_root + "svm", body, header=headers)
+ if err is not None:
+ return False, "update svm_name error"
+ return True, None
+
+ def update_tier_level(self, api_root, rest_api, headers, tier_level):
+ body = {'level': tier_level}
+ response, err, dummy = rest_api.post(api_root + "change-tier-level", body, header=headers)
+ if err is not None:
+ return False, 'Error: unexpected response on modify tier_level: %s, %s' % (str(err), str(response))
+
+ return True, None
+
+ def update_writing_speed_state(self, api_root, rest_api, headers, writing_speed_state):
+ body = {'writingSpeedState': writing_speed_state.upper()}
+ response, err, dummy = rest_api.put(api_root + "writing-speed", body, header=headers)
+ if err is not None:
+ return False, 'Error: unexpected response on modify writing_speed_state: %s, %s' % (str(err), str(response))
+ # check upgrade status
+ dummy, err = self.wait_cvo_update_complete(rest_api, headers)
+ return err is None, err
+
+ def update_instance_license_type(self, api_root, rest_api, headers, instance_type, license_type):
+ body = {'instanceType': instance_type,
+ 'licenseType': license_type}
+ response, err, dummy = rest_api.put(api_root + "license-instance-type", body, header=headers)
+ if err is not None:
+ return False, 'Error: unexpected response on modify instance_type and license_type: %s, %s' % (str(err), str(response))
+ # check upgrade status
+ dummy, err = self.wait_cvo_update_complete(rest_api, headers)
+ return err is None, err
+
+ def set_config_flag(self, rest_api, headers):
+ body = {'value': True, 'valueType': 'BOOLEAN'}
+ base_url = '/occm/api/occm/config/skip-eligibility-paygo-upgrade'
+ response, err, dummy = rest_api.put(base_url, body, header=headers)
+ if err is not None:
+ return False, "set_config_flag error"
+
+ return True, None
+
+ def do_ontap_image_upgrade(self, rest_api, headers, desired):
+ # get ONTAP image version
+ we, err = self.get_working_environment_property(rest_api, headers, ['ontapClusterProperties.fields(upgradeVersions)'])
+ if err is not None:
+ return False, 'Error: get_working_environment_property failed: %s' % (str(err))
+ body = {'updateType': "OCCM_PROVIDED"}
+ for image_info in we['ontapClusterProperties']['upgradeVersions']:
+ if image_info['imageVersion'] in desired:
+ body['updateParameter'] = image_info['imageVersion']
+ break
+ # upgrade
+ base_url = "%s/working-environments/%s/update-image" % (rest_api.api_root_path, self.parameters['working_environment_id'])
+ response, err, dummy = rest_api.post(base_url, body, header=headers)
+ if err is not None:
+ return False, 'Error: unexpected response on do_ontap_image_upgrade: %s, %s' % (str(err), str(response))
+ else:
+ return True, None
+
+ def wait_ontap_image_upgrade_complete(self, rest_api, headers, desired):
+ retry_count = 65
+ if self.parameters['is_ha'] is True:
+ retry_count *= 2
+ for count in range(retry_count):
+ # get CVO status
+ we, err = self.get_working_environment_property(rest_api, headers, ['status', 'ontapClusterProperties'])
+ if err is not None:
+ return False, 'Error: get_working_environment_property failed: %s' % (str(err))
+ if we['status']['status'] != "UPDATING" and we['ontapClusterProperties']['ontapVersion'] != "":
+ if we['ontapClusterProperties']['ontapVersion'] in desired:
+ return True, None
+ time.sleep(60)
+
+ return False, 'Error: Taking too long for CVO to be active or not properly setup'
+
+ def upgrade_ontap_image(self, rest_api, headers, desired):
+ # set flag
+ dummy, err = self.set_config_flag(rest_api, headers)
+ if err is not None:
+ return False, err
+ # upgrade
+ dummy, err = self.do_ontap_image_upgrade(rest_api, headers, desired)
+ if err is not None:
+ return False, err
+ # check upgrade status
+ dummy, err = self.wait_ontap_image_upgrade_complete(rest_api, headers, desired)
+ return err is None, err
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aggregate.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aggregate.py
new file mode 100644
index 000000000..9533d5f91
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aggregate.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_aggregate
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_aggregate
+short_description: NetApp Cloud Manager Aggregate
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, Modify or Delete Aggregate on Cloud Manager.
+
+options:
+ state:
+ description:
+ - Whether the specified aggregate should exist or not.
+ choices: ['present', 'absent']
+ required: true
+ type: str
+
+ name:
+ description:
+ - The name of the new aggregate.
+ required: true
+ type: str
+
+ working_environment_name:
+ description:
+ - The working environment name where the aggregate will be created.
+ type: str
+
+ working_environment_id:
+ description:
+ - The public ID of the working environment where the aggregate will be created.
+ type: str
+
+ client_id:
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ required: true
+ type: str
+
+ number_of_disks:
+ description:
+ - The required number of disks in the new aggregate.
+ type: int
+
+ disk_size_size:
+ description:
+ - The required size of the disks.
+ type: int
+
+ disk_size_unit:
+ description:
+ - The disk size unit ['GB' or 'TB']. The default is 'TB'.
+ choices: ['GB', 'TB']
+ default: 'TB'
+ type: str
+
+ home_node:
+ description:
+ - The home node that the new aggregate should belong to.
+ type: str
+
+ provider_volume_type:
+ description:
+ - The cloud provider volume type.
+ type: str
+
+ capacity_tier:
+ description:
+ - The aggregate's capacity tier for tiering cold data to object storage.
+ - If the value is NONE, the capacity_tier will not be set on aggregate creation.
+ choices: [ 'NONE', 'S3', 'Blob', 'cloudStorage']
+ type: str
+
+ iops:
+ description:
+ - Provisioned IOPS. Needed only when providerVolumeType is "io1".
+ type: int
+
+ throughput:
+ description:
+ - Unit is Mb/s. Valid range 125-1000.
+ - Required only when provider_volume_type is 'gp3'.
+ type: int
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = '''
+- name: Create Aggregate
+ netapp.cloudmanager.na_cloudmanager_aggregate:
+ state: present
+ name: AnsibleAggregate
+ working_environment_name: testAWS
+ client_id: "{{ client_id }}"
+ number_of_disks: 2
+ refresh_token: xxx
+
+- name: Delete Volume
+ netapp.cloudmanager.na_cloudmanager_aggregate:
+ state: absent
+ name: AnsibleAggregate
+ working_environment_name: testAWS
+ client_id: "{{ client_id }}"
+ refresh_token: xxx
+'''
+
+RETURN = '''
+msg:
+ description: Success message.
+ returned: success
+ type: str
+ sample: "Aggregate Created"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+
+class NetAppCloudmanagerAggregate(object):
+ '''
+ Contains methods to parse arguments,
+ derive details of CloudmanagerAggregate objects
+ and send requests to CloudmanagerAggregate via
+ the restApi
+ '''
+
+ def __init__(self):
+ '''
+ Parse arguments, setup state variables,
+ check parameters and ensure request module is installed
+ '''
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ working_environment_id=dict(required=False, type='str'),
+ working_environment_name=dict(required=False, type='str'),
+ client_id=dict(required=True, type='str'),
+ number_of_disks=dict(required=False, type='int'),
+ disk_size_size=dict(required=False, type='int'),
+ disk_size_unit=dict(required=False, choices=['GB', 'TB'], default='TB'),
+ home_node=dict(required=False, type='str'),
+ provider_volume_type=dict(required=False, type='str'),
+ capacity_tier=dict(required=False, choices=['NONE', 'S3', 'Blob', 'cloudStorage'], type='str'),
+ iops=dict(required=False, type='int'),
+ throughput=dict(required=False, type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[
+ ['refresh_token', 'sa_client_id'],
+ ['working_environment_name', 'working_environment_id'],
+ ],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ required_if=[
+ ['provider_volume_type', 'gp3', ['iops', 'throughput']],
+ ['provider_volume_type', 'io1', ['iops']],
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Calling generic rest_api class
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.rest_api.api_root_path = None
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+
+ def get_aggregate(self):
+ '''
+ Get aggregate details
+ '''
+ working_environment_detail = None
+ if 'working_environment_id' in self.parameters:
+ working_environment_detail, error = self.na_helper.get_working_environment_details(self.rest_api, self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: Cannot find working environment: %s" % str(error))
+ elif 'working_environment_name' in self.parameters:
+ working_environment_detail, error = self.na_helper.get_working_environment_details_by_name(self.rest_api,
+ self.headers,
+ self.parameters['working_environment_name'])
+ if error is not None:
+ self.module.fail_json(msg="Error: Cannot find working environment: %s" % str(error))
+ else:
+ self.module.fail_json(msg="Error: Missing working environment information")
+ if working_environment_detail is not None:
+ self.parameters['working_environment_id'] = working_environment_detail['publicId']
+ self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
+ api_root_path = self.rest_api.api_root_path
+
+ if working_environment_detail['cloudProviderName'] != "Amazon":
+ api = '%s/aggregates/%s' % (api_root_path, working_environment_detail['publicId'])
+ else:
+ api = '%s/aggregates?workingEnvironmentId=%s' % (api_root_path, working_environment_detail['publicId'])
+ response, error, dummy = self.rest_api.get(api, header=self.headers)
+ if error:
+ self.module.fail_json(msg="Error: Failed to get aggregate list: %s, %s" % (str(error), str(response)))
+ for aggr in response:
+ if aggr['name'] == self.parameters['name']:
+ return aggr
+ return None
+
+ def create_aggregate(self):
+ '''
+ Create aggregate
+ '''
+ api = '%s/aggregates' % self.rest_api.api_root_path
+ # check if all the required parameters exist
+ body = {
+ 'name': self.parameters['name'],
+ 'workingEnvironmentId': self.parameters['working_environment_id'],
+ 'numberOfDisks': self.parameters['number_of_disks'],
+ 'diskSize': {'size': self.parameters['disk_size_size'],
+ 'unit': self.parameters['disk_size_unit']},
+ }
+ # optional parameters
+ if 'home_node' in self.parameters:
+ body['homeNode'] = self.parameters['home_node']
+ if 'provider_volume_type' in self.parameters:
+ body['providerVolumeType'] = self.parameters['provider_volume_type']
+ if 'capacity_tier' in self.parameters and self.parameters['capacity_tier'] != "NONE":
+ body['capacityTier'] = self.parameters['capacity_tier']
+ if 'iops' in self.parameters:
+ body['iops'] = self.parameters['iops']
+ if 'throughput' in self.parameters:
+ body['throughput'] = self.parameters['throughput']
+ response, error, dummy = self.rest_api.post(api, body, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on aggregate creation: %s, %s" % (str(error), str(response)))
+
+ def update_aggregate(self, add_number_of_disks):
+ '''
+ Update aggregate with aggregate name and the parameters number_of_disks will be added
+ '''
+ api = '%s/aggregates/%s/%s/disks' % (self.rest_api.api_root_path, self.parameters['working_environment_id'],
+ self.parameters['name'])
+ body = {
+ 'aggregateName': self.parameters['name'],
+ 'workingEnvironmentId': self.parameters['working_environment_id'],
+ 'numberOfDisks': add_number_of_disks
+ }
+ response, error, dummy = self.rest_api.post(api, body, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on aggregate adding disks: %s, %s" % (str(error), str(response)))
+
+ def delete_aggregate(self):
+ '''
+ Delete aggregate with aggregate name
+ '''
+ api = '%s/aggregates/%s/%s' % (self.rest_api.api_root_path, self.parameters['working_environment_id'],
+ self.parameters['name'])
+ body = {
+ 'aggregateName': self.parameters['name'],
+ 'workingEnvironmentId': self.parameters['working_environment_id'],
+ }
+ response, error, dummy = self.rest_api.delete(api, body, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on aggregate deletion: %s, %s" % (str(error), str(response)))
+
+ def apply(self):
+ '''
+ Check, process and initiate aggregate operation
+ '''
+ # check if aggregate exists
+ current = self.get_aggregate()
+ # check the action
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed:
+ action = cd_action + "_aggregate"
+ have_all_required, missed_params = self.na_helper.have_required_parameters(action)
+ if not have_all_required:
+ self.module.fail_json(msg="Error: Missing required parameters (%s) on %s" % (str(missed_params), action))
+ add_disks = 0
+ if current and self.parameters['state'] != 'absent':
+ have_all_required, missed_params = self.na_helper.have_required_parameters("update_aggregate")
+ if not have_all_required:
+ self.module.fail_json(msg="Error: Missing required parameters (%s) on update_aggregate" % str(missed_params))
+ if len(current['disks']) < self.parameters['number_of_disks']:
+ add_disks = self.parameters['number_of_disks'] - len(current['disks'])
+ self.na_helper.changed = True
+ elif len(current['disks']) > self.parameters['number_of_disks']:
+ self.module.fail_json(msg="Error: Only add disk support. number_of_disks cannot be reduced")
+
+ result_message = ""
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == "create":
+ self.create_aggregate()
+ result_message = "Aggregate Created"
+ elif cd_action == "delete":
+ self.delete_aggregate()
+ result_message = "Aggregate Deleted"
+ else: # modify
+ self.update_aggregate(add_disks)
+ result_message = "Aggregate Updated"
+ self.module.exit_json(changed=self.na_helper.changed, msg=result_message)
+
+
+def main():
+ '''
+ Create NetAppCloudmanagerAggregate class instance and invoke apply
+ :return: None
+ '''
+ na_cloudmanager_aggregate = NetAppCloudmanagerAggregate()
+ na_cloudmanager_aggregate.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aws_fsx.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aws_fsx.py
new file mode 100644
index 000000000..8e757b989
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_aws_fsx.py
@@ -0,0 +1,458 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_aws_fsx
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_aws_fsx
+short_description: Cloud ONTAP file system(FSx) in AWS
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.13.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or delete CVO/Working Environment for AWS FSx.
+
+options:
+
+ state:
+ description:
+ - Whether the specified FSx in AWS should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the CVO/Working Environment for AWS FSx to manage.
+ type: str
+
+ region:
+ description:
+ - The region where the working environment will be created.
+ type: str
+
+ aws_credentials_name:
+ description:
+ - The name of the AWS Credentials account name.
+ type: str
+
+ workspace_id:
+ description:
+ - The ID of the Cloud Manager workspace of working environment.
+ type: str
+
+ tenant_id:
+ required: true
+ description:
+ - The NetApp account ID that the File System will be associated with.
+ type: str
+
+ working_environment_id:
+ description:
+ - The ID of the AWS FSx working environment used for delete.
+ type: str
+
+ storage_capacity_size:
+ description:
+ - volume size for the first data aggregate.
+ - For GB, the value can be [100 or 500].
+ - For TB, the value can be [1,2,4,8,16].
+ type: int
+
+ storage_capacity_size_unit:
+ description:
+ - The unit for volume size.
+ choices: ['GiB', 'TiB']
+ type: str
+
+ fsx_admin_password:
+ description:
+ - The admin password for Cloud Volumes ONTAP fsxadmin user.
+ type: str
+
+ throughput_capacity:
+ description:
+ - The capacity of the throughput.
+ choices: [512, 1024, 2048]
+ type: int
+
+ security_group_ids:
+ description:
+ - The IDs of the security groups for the working environment, multiple security groups can be provided separated by ','.
+ type: list
+ elements: str
+
+ kms_key_id:
+ description:
+ - AWS encryption parameters. It is required if using aws encryption.
+ type: str
+
+ tags:
+ description:
+ - Additional tags for the FSx AWS working environment.
+ type: list
+ elements: dict
+ suboptions:
+ tag_key:
+ description: The key of the tag.
+ type: str
+ tag_value:
+ description: The tag value.
+ type: str
+
+ primary_subnet_id:
+ description:
+ - The subnet ID of the first node.
+ type: str
+
+ secondary_subnet_id:
+ description:
+ - The subnet ID of the second node.
+ type: str
+
+ route_table_ids:
+ description:
+ - The list of route table IDs that will be updated with the floating IPs.
+ type: list
+ elements: str
+
+ minimum_ssd_iops:
+ description:
+ - Provisioned SSD IOPS.
+ type: int
+
+ endpoint_ip_address_range:
+ description:
+ - The endpoint IP address range.
+ type: str
+
+ import_file_system:
+ description:
+ - bool option to existing import AWS file system to CloudManager.
+ type: bool
+ default: false
+ version_added: 21.17.0
+
+ file_system_id:
+ description:
+ - The AWS file system ID to import to CloudManager. Required when import_file_system is 'True'
+ type: str
+ version_added: 21.17.0
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = """
+- name: Create NetApp AWS FSx
+ netapp.cloudmanager.na_cloudmanager_aws_fsx:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: fsxAnsible
+ region: us-east-2
+ workspace_id: workspace-xxxxx
+ tenant_id: account-xxxxx
+ storage_capacity_size: 1024
+ storage_capacity_size_unit: TiB
+ aws_credentials_name: xxxxxxx
+ primary_subnet_id: subnet-xxxxxx
+ secondary_subnet_id: subnet-xxxxx
+ throughput_capacity: 512
+ fsx_admin_password: xxxxxxx
+ tags: [
+ {tag_key: abcd,
+ tag_value: ABCD}]
+
+- name: Import AWS FSX
+ na_cloudmanager_aws_fsx:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: fsxAnsible
+ region: us-west-2
+ workspace_id: workspace-xxxxx
+ import_file_system: True
+ file_system_id: "{{ xxxxxxxxxxxxxxx }}"
+ tenant_id: account-xxxxx
+ aws_credentials_name: xxxxxxx
+
+- name: Delete NetApp AWS FSx
+ netapp.cloudmanager.na_cloudmanager_aws_fsx:
+ state: absent
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ working_environment_id: fs-xxxxxx
+ name: fsxAnsible
+ tenant_id: account-xxxxx
+"""
+
+RETURN = '''
+working_environment_id:
+ description: Newly created AWS FSx working_environment_id.
+ type: str
+ returned: success
+'''
+
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+
+class NetAppCloudManagerAWSFSX:
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ region=dict(required=False, type='str'),
+ aws_credentials_name=dict(required=False, type='str'),
+ workspace_id=dict(required=False, type='str'),
+ tenant_id=dict(required=True, type='str'),
+ working_environment_id=dict(required=False, type='str'),
+ storage_capacity_size=dict(required=False, type='int'),
+ storage_capacity_size_unit=dict(required=False, type='str', choices=['GiB', 'TiB']),
+ fsx_admin_password=dict(required=False, type='str', no_log=True),
+ throughput_capacity=dict(required=False, type='int', choices=[512, 1024, 2048]),
+ security_group_ids=dict(required=False, type='list', elements='str'),
+ kms_key_id=dict(required=False, type='str', no_log=True),
+ tags=dict(required=False, type='list', elements='dict', options=dict(
+ tag_key=dict(type='str', no_log=False),
+ tag_value=dict(type='str')
+ )),
+ primary_subnet_id=dict(required=False, type='str'),
+ secondary_subnet_id=dict(required=False, type='str'),
+ route_table_ids=dict(required=False, type='list', elements='str'),
+ minimum_ssd_iops=dict(required=False, type='int'),
+ endpoint_ip_address_range=dict(required=False, type='str'),
+ import_file_system=dict(required=False, type='bool', default=False),
+ file_system_id=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ['state', 'present', ['region', 'aws_credentials_name', 'workspace_id', 'fsx_admin_password', 'throughput_capacity',
+ 'primary_subnet_id', 'secondary_subnet_id', 'storage_capacity_size', 'storage_capacity_size_unit']],
+ ['import_file_system', True, ['file_system_id']]
+ ],
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key'], ['storage_capacity_size', 'storage_capacity_size_unit']],
+ supports_check_mode=True,
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.headers = None
+ if self.rest_api.simulator:
+ self.headers = {
+ 'x-simulator': 'true'
+ }
+ if self.parameters['state'] == 'present':
+ self.aws_credentials_id, error = self.get_aws_credentials_id()
+ if error is not None:
+ self.module.fail_json(msg=str(error))
+
+ def get_aws_credentials_id(self):
+ """
+ Get aws_credentials_id
+ :return: AWS Credentials ID
+ """
+ api = "/fsx-ontap/aws-credentials/"
+ api += self.parameters['tenant_id']
+ response, error, dummy = self.rest_api.get(api, None, header=self.headers)
+ if error:
+ return response, "Error: getting aws_credentials_id %s" % error
+ for each in response:
+ if each['name'] == self.parameters['aws_credentials_name']:
+ return each['id'], None
+ return None, "Error: aws_credentials_name not found"
+
+ def discover_aws_fsx(self):
+ """
+ discover aws_fsx
+ """
+ api = "/fsx-ontap/working-environments/%s/discover?credentials-id=%s&workspace-id=%s&region=%s"\
+ % (self.parameters['tenant_id'], self.aws_credentials_id, self.parameters['workspace_id'], self.parameters['region'])
+ response, error, dummy = self.rest_api.get(api, None, header=self.headers)
+ if error:
+ return "Error: discovering aws_fsx %s" % error
+ id_found = False
+ for each in response:
+ if each['id'] == self.parameters['file_system_id']:
+ id_found = True
+ break
+ if not id_found:
+ return "Error: file_system_id provided could not be found"
+
+ def recover_aws_fsx(self):
+ """
+ recover aws_fsx
+ """
+ json = {"name": self.parameters['name'],
+ "region": self.parameters['region'],
+ "workspaceId": self.parameters['workspace_id'],
+ "credentialsId": self.aws_credentials_id,
+ "fileSystemId": self.parameters['file_system_id'],
+ }
+ api_url = "/fsx-ontap/working-environments/%s/recover" % self.parameters['tenant_id']
+ response, error, dummy = self.rest_api.post(api_url, json, header=self.headers)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: unexpected response on recovering AWS FSx: %s, %s" % (error, response))
+
+ def create_aws_fsx(self):
+ """ Create AWS FSx """
+ json = {"name": self.parameters['name'],
+ "region": self.parameters['region'],
+ "workspaceId": self.parameters['workspace_id'],
+ "credentialsId": self.aws_credentials_id,
+ "throughputCapacity": self.parameters['throughput_capacity'],
+ "storageCapacity": {
+ "size": self.parameters['storage_capacity_size'],
+ "unit": self.parameters['storage_capacity_size_unit']},
+ "fsxAdminPassword": self.parameters['fsx_admin_password'],
+ "primarySubnetId": self.parameters['primary_subnet_id'],
+ "secondarySubnetId": self.parameters['secondary_subnet_id'],
+ }
+
+ if self.parameters.get('tags') is not None:
+ tags = []
+ for each_tag in self.parameters['tags']:
+ tag = {
+ 'key': each_tag['tag_key'],
+ 'value': each_tag['tag_value']
+ }
+
+ tags.append(tag)
+ json.update({"tags": tags})
+
+ if self.parameters.get('security_group_ids'):
+ json.update({"securityGroupIds": self.parameters['security_group_ids']})
+
+ if self.parameters.get('route_table_ids'):
+ json.update({"routeTableIds": self.parameters['route_table_ids']})
+
+ if self.parameters.get('kms_key_id'):
+ json.update({"kmsKeyId": self.parameters['kms_key_id']})
+
+ if self.parameters.get('minimum_ssd_iops'):
+ json.update({"minimumSsdIops": self.parameters['minimum_ssd_iops']})
+
+ if self.parameters.get('endpoint_ip_address_range'):
+ json.update({"endpointIpAddressRange": self.parameters['endpoint_ip_address_range']})
+
+ api_url = '/fsx-ontap/working-environments/%s' % self.parameters['tenant_id']
+ response, error, dummy = self.rest_api.post(api_url, json, header=self.headers)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: unexpected response on creating AWS FSx: %s, %s" % (str(error), str(response)))
+ working_environment_id = response['id']
+ creation_wait_time = 30
+ creation_retry_count = 30
+ wait_on_completion_api_url = '/fsx-ontap/working-environments/%s/%s?provider-details=true' % (self.parameters['tenant_id'], working_environment_id)
+
+ err = self.wait_on_completion_for_fsx(wait_on_completion_api_url, "AWS_FSX", "create", creation_retry_count, creation_wait_time)
+
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating AWS FSX: %s" % str(err))
+
+ return working_environment_id
+
+ def wait_on_completion_for_fsx(self, api_url, action_name, task, retries, wait_interval):
+ while True:
+ fsx_status, error = self.check_task_status_for_fsx(api_url)
+ if error is not None:
+ return error
+ if fsx_status['status']['status'] == "ON" and fsx_status['status']['lifecycle'] == "AVAILABLE":
+ return None
+ elif fsx_status['status']['status'] == "FAILED":
+ return 'Failed to %s %s' % (task, action_name)
+ if retries == 0:
+ return 'Taking too long for %s to %s or not properly setup' % (action_name, task)
+ time.sleep(wait_interval)
+ retries = retries - 1
+
+ def check_task_status_for_fsx(self, api_url):
+
+ network_retries = 3
+ exponential_retry_time = 1
+ while True:
+ result, error, dummy = self.rest_api.get(api_url, None, header=self.headers)
+ if error is not None:
+ if network_retries > 0:
+ time.sleep(exponential_retry_time)
+ exponential_retry_time *= 2
+ network_retries = network_retries - 1
+ else:
+ return 0, error
+ else:
+ response = result
+ break
+ return response['providerDetails'], None
+
+ def delete_aws_fsx(self, id, tenant_id):
+ """
+ Delete AWS FSx
+ """
+ api_url = '/fsx-ontap/working-environments/%s/%s' % (tenant_id, id)
+ response, error, dummy = self.rest_api.delete(api_url, None, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on deleting AWS FSx: %s, %s" % (str(error), str(response)))
+
+ def apply(self):
+ """
+ Apply action to the AWS FSx working Environment
+ :return: None
+ """
+ working_environment_id = None
+ current, error = self.na_helper.get_aws_fsx_details(self.rest_api, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on fetching AWS FSx: %s" % str(error))
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.parameters['import_file_system'] and cd_action == "create":
+ error = self.discover_aws_fsx()
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on discovering AWS FSx: %s" % str(error))
+ cd_action = "import"
+ self.na_helper.changed = True
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == "import":
+ self.recover_aws_fsx()
+ working_environment_id = self.parameters['file_system_id']
+ elif cd_action == "create":
+ working_environment_id = self.create_aws_fsx()
+ elif cd_action == "delete":
+ self.delete_aws_fsx(current['id'], self.parameters['tenant_id'])
+
+ self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id)
+
+
+def main():
+ """
+ Create AWS FSx class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppCloudManagerAWSFSX()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cifs_server.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cifs_server.py
new file mode 100644
index 000000000..89e10a81b
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cifs_server.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_cifs_server
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_cifs_server
+short_description: NetApp Cloud Manager cifs server
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or Delete a CIFS server on the Cloud Volume ONTAP system to support CIFS volumes, based on an Active Directory or Workgroup.
+
+options:
+ state:
+ description:
+ - Whether the specified cifs server should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ working_environment_name:
+ description:
+ - The working environment name where the cifs server will be created.
+ type: str
+
+ working_environment_id:
+ description:
+ - The public ID of the working environment where the cifs server will be created.
+ type: str
+
+ client_id:
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ required: true
+ type: str
+
+ domain:
+ description:
+ - The active directory domain name. For CIFS AD only.
+ type: str
+
+ dns_domain:
+ description:
+ - The DNS domain name. For CIFS AD only.
+ type: str
+
+ username:
+ description:
+ - The active directory admin user name. For CIFS AD only.
+ type: str
+
+ password:
+ description:
+ - The active directory admin password. For CIFS AD only.
+ type: str
+
+ ip_addresses:
+ description:
+ - The DNS server IP addresses. For CIFS AD only.
+ type: list
+ elements: str
+
+ netbios:
+ description:
+ - The CIFS server NetBIOS name. For CIFS AD only.
+ type: str
+
+ organizational_unit:
+ description:
+ - The organizational unit in which to register the CIFS server. For CIFS AD only.
+ type: str
+
+ is_workgroup:
+ description:
+ - For CIFS workgroup operations, set to true.
+ type: bool
+
+ server_name:
+ description:
+ - The server name. For CIFS workgroup only.
+ type: str
+
+ workgroup_name:
+ description:
+ - The workgroup name. For CIFS workgroup only.
+ type: str
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = '''
+- name: Create cifs server with working_environment_id
+ netapp.cloudmanager.na_cloudmanager_cifs_server:
+ state: present
+ working_environment_id: VsaWorkingEnvironment-abcdefgh
+ client_id: your_client_id
+ refresh_token: your_refresh_token
+ domain: example.com
+ username: admin
+ password: pass
+ dns_domain: example.com
+ ip_addresses: ["1.0.0.0"]
+ netbios: cvoname
+ organizational_unit: CN=Computers
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppCloudmanagerCifsServer:
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check parameters and ensure request module is installed
+ """
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ working_environment_id=dict(required=False, type='str'),
+ working_environment_name=dict(required=False, type='str'),
+ client_id=dict(required=True, type='str'),
+ domain=dict(required=False, type='str'),
+ dns_domain=dict(required=False, type='str'),
+ username=dict(required=False, type='str'),
+ password=dict(required=False, type='str', no_log=True),
+ ip_addresses=dict(required=False, type='list', elements='str'),
+ netbios=dict(required=False, type='str'),
+ organizational_unit=dict(required=False, type='str'),
+ is_workgroup=dict(required=False, type='bool'),
+ server_name=dict(required=False, type='str'),
+ workgroup_name=dict(required=False, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[
+ ['refresh_token', 'sa_client_id'],
+ ['working_environment_name', 'working_environment_id'],
+ ],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ mutually_exclusive=[
+ ('domain', 'server_name'),
+ ('dns_domain', 'server_name'),
+ ('username', 'server_name'),
+ ('password', 'server_name'),
+ ('ip_addresses', 'server_name'),
+ ('netbios', 'server_name'),
+ ('organizational_unit', 'server_name'),
+ ('domain', 'workgroup_name'),
+ ('dns_domain', 'workgroup_name'),
+ ('username', 'workgroup_name'),
+ ('password', 'workgroup_name'),
+ ('ip_addresses', 'workgroup_name'),
+ ('netbios', 'workgroup_name'),
+ ('organizational_unit', 'workgroup_name'),
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Calling generic rest_api class
+ self.rest_api = netapp_utils.CloudManagerRestAPI(self.module)
+ self.rest_api.token_type, self.rest_api.token = self.rest_api.get_token()
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+ if self.parameters.get('working_environment_id'):
+ working_environment_detail, error = self.na_helper.get_working_environment_details(self.rest_api, self.headers)
+ else:
+ working_environment_detail, error = self.na_helper.get_working_environment_details_by_name(self.rest_api,
+ self.headers,
+ self.parameters['working_environment_name'])
+ if working_environment_detail is not None:
+ self.parameters['working_environment_id'] = working_environment_detail['publicId']
+ else:
+ self.module.fail_json(msg="Error: Cannot find working environment: %s" % str(error))
+ self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
+
+ def get_cifs_server(self):
+ response, err, dummy = self.rest_api.send_request("GET", "%s/working-environments/%s/cifs" % (
+ self.rest_api.api_root_path, self.parameters['working_environment_id']), None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error on get_cifs_server: %s, %s" % (str(err), str(response)))
+ current_cifs = dict()
+ if response is None or len(response) == 0:
+ return None
+ # only one cifs server exists per working environment.
+ for server in response:
+ if server.get('activeDirectoryDomain'):
+ current_cifs['domain'] = server['activeDirectoryDomain']
+ if server.get('dnsDomain'):
+ current_cifs['dns_domain'] = server['dnsDomain']
+ if server.get('ipAddresses'):
+ current_cifs['ip_addresses'] = server['ipAddresses']
+ if server.get('organizationalUnit'):
+ current_cifs['organizational_unit'] = server['organizationalUnit']
+ if server.get('netBIOS'):
+ current_cifs['netbios'] = server['netBIOS']
+ return current_cifs
+
+ def create_cifs_server(self):
+ exclude_list = ['client_id', 'domain', 'netbios', 'username', 'password']
+ server = self.na_helper.convert_module_args_to_api(self.parameters, exclude_list)
+ if self.parameters.get('domain'):
+ server['activeDirectoryDomain'] = self.parameters['domain']
+ if self.parameters.get('netbios'):
+ server['netBIOS'] = self.parameters['netbios']
+ if self.parameters.get('username'):
+ server['activeDirectoryUsername'] = self.parameters['username']
+ if self.parameters.get('password'):
+ server['activeDirectoryPassword'] = self.parameters['password']
+ url = "%s/working-environments/%s/cifs" % (self.rest_api.api_root_path,
+ self.parameters['working_environment_id'])
+ if self.parameters.get('is_workgroup'):
+ url = url + "-workgroup"
+
+ response, err, dummy = self.rest_api.send_request("POST", url, None, server, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error on create_cifs_server failed: %s, %s" % (str(err), str(response)))
+
+ def delete_cifs_server(self):
+ response, err, dummy = self.rest_api.send_request("POST", "%s/working-environments/%s/delete-cifs" % (
+ self.rest_api.api_root_path, self.parameters['working_environment_id']), None, {}, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error on delete_cifs_server: %s, %s" % (str(err), str(response)))
+
+ def apply(self):
+ current = self.get_cifs_server()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_cifs_server()
+ elif cd_action == 'delete':
+ self.delete_cifs_server()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Main Function'''
+ server = NetAppCloudmanagerCifsServer()
+ server.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_aws.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_aws.py
new file mode 100644
index 000000000..b1a22829e
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_aws.py
@@ -0,0 +1,655 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_connector_aws
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_connector_aws
+short_description: NetApp Cloud Manager connector for AWS
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create or delete Cloud Manager connector for AWS.
+ - This module requires to be authenticated with AWS. This can be done with C(aws configure).
+
+options:
+
+ state:
+ description:
+ - Whether the specified Cloud Manager connector for AWS should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the Cloud Manager connector for AWS to manage.
+ type: str
+
+ instance_type:
+ description:
+ - The type of instance (for example, t3.xlarge). At least 4 CPU and 16 GB of memory are required.
+ type: str
+ default: t3.xlarge
+
+ key_name:
+ description:
+ - The name of the key pair to use for the Connector instance.
+ type: str
+
+ subnet_id:
+ description:
+ - The ID of the subnet for the instance.
+ type: str
+
+ region:
+ required: true
+ description:
+ - The region where the Cloud Manager Connector will be created.
+ type: str
+
+ instance_id:
+ description:
+ - The ID of the EC2 instance used for delete.
+ type: str
+
+ client_id:
+ description:
+ - The unique client ID of the Connector.
+ - The connector ID.
+ type: str
+
+ ami:
+ description:
+ - The image ID.
+ type: str
+
+ company:
+ description:
+ - The name of the company of the user.
+ type: str
+
+ security_group_ids:
+ description:
+ - The IDs of the security groups for the instance, multiple security groups can be provided separated by ','.
+ type: list
+ elements: str
+
+ iam_instance_profile_name:
+ description:
+ - The name of the instance profile for the Connector.
+ type: str
+
+ enable_termination_protection:
+ description:
+ - Indicates whether to enable termination protection on the instance.
+ type: bool
+ default: false
+
+ associate_public_ip_address:
+ description:
+ - Indicates whether to associate a public IP address to the instance. If not provided, the association will be done based on the subnet's configuration.
+ type: bool
+ default: true
+
+ account_id:
+ description:
+ - The NetApp tenancy account ID.
+ type: str
+
+ proxy_url:
+ description:
+ - The proxy URL, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_user_name:
+ description:
+ - The proxy user name, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_password:
+ description:
+ - The proxy password, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_certificates:
+ description:
+ - The proxy certificates, a list of certificate file names.
+ type: list
+ elements: str
+ version_added: 21.5.0
+
+ aws_tag:
+ description:
+ - Additional tags for the AWS EC2 instance.
+ type: list
+ elements: dict
+ suboptions:
+ tag_key:
+ description: The key of the tag.
+ type: str
+ tag_value:
+ description: The tag value.
+ type: str
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = """
+- name: Create NetApp Cloud Manager connector for AWS
+ netapp.cloudmanager.na_cloudmanager_connector_aws:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: bsuhas_ansible_occm
+ region: us-west-1
+ key_name: dev_automation
+ subnet_id: subnet-xxxxx
+ security_group_ids: [sg-xxxxxxxxxxx]
+ iam_instance_profile_name: OCCM_AUTOMATION
+ account_id: "{{ account-xxxxxxx }}"
+ company: NetApp
+ proxy_url: abc.com
+ proxy_user_name: xyz
+ proxy_password: abcxyz
+ proxy_certificates: [abc.crt.txt, xyz.crt.txt]
+ aws_tag: [
+ {tag_key: abc,
+ tag_value: a123}]
+
+- name: Delete NetApp Cloud Manager connector for AWS
+ netapp.cloudmanager.na_cloudmanager_connector_aws:
+ state: absent
+ name: ansible
+ region: us-west-1
+ account_id: "{{ account-xxxxxxx }}"
+ instance_id: i-xxxxxxxxxxxxx
+ client_id: xxxxxxxxxxxxxxxxxxx
+"""
+
+RETURN = """
+ids:
+ description: Newly created AWS client ID in cloud manager, instance ID and account ID.
+ type: dict
+ returned: success
+"""
+
+import traceback
+import uuid
+import time
+import base64
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+IMPORT_EXCEPTION = None
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_AWS_LIB = True
+except ImportError as exc:
+ HAS_AWS_LIB = False
+ IMPORT_EXCEPTION = exc
+
+UUID = str(uuid.uuid4())
+
+
+class NetAppCloudManagerConnectorAWS(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ instance_type=dict(required=False, type='str', default='t3.xlarge'),
+ key_name=dict(required=False, type='str'),
+ subnet_id=dict(required=False, type='str'),
+ region=dict(required=True, type='str'),
+ instance_id=dict(required=False, type='str'),
+ client_id=dict(required=False, type='str'),
+ ami=dict(required=False, type='str'),
+ company=dict(required=False, type='str'),
+ security_group_ids=dict(required=False, type='list', elements='str'),
+ iam_instance_profile_name=dict(required=False, type='str'),
+ enable_termination_protection=dict(required=False, type='bool', default=False),
+ associate_public_ip_address=dict(required=False, type='bool', default=True),
+ account_id=dict(required=False, type='str'),
+ proxy_url=dict(required=False, type='str'),
+ proxy_user_name=dict(required=False, type='str'),
+ proxy_password=dict(required=False, type='str', no_log=True),
+ proxy_certificates=dict(required=False, type='list', elements='str'),
+ aws_tag=dict(required=False, type='list', elements='dict', options=dict(
+ tag_key=dict(type='str', no_log=False),
+ tag_value=dict(type='str')
+ )),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ['state', 'present', ['company', 'iam_instance_profile_name', 'key_name', 'security_group_ids', 'subnet_id']],
+ ],
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ supports_check_mode=True
+ )
+
+ if HAS_AWS_LIB is False:
+ self.module.fail_json(msg="the python AWS packages boto3 and botocore are required. Command is pip install boto3."
+ "Import error: %s" % str(IMPORT_EXCEPTION))
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = CloudManagerRestAPI(self.module)
+
+ def get_instance(self):
+ """
+ Get Cloud Manager connector for AWS
+ :return:
+ Dictionary of current details if Cloud Manager connector for AWS
+ None if Cloud Manager connector for AWS is not found
+ """
+
+ response = None
+ client = boto3.client('ec2', region_name=self.parameters['region'])
+ filters = [{'Name': 'tag:Name', 'Values': [self.parameters['name']]},
+ {'Name': 'tag:OCCMInstance', 'Values': ['true']}]
+
+ kwargs = {'Filters': filters} if self.parameters.get('instance_id') is None else {'InstanceIds': [self.parameters['instance_id']]}
+
+ try:
+ response = client.describe_instances(**kwargs)
+
+ except ClientError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ if len(response['Reservations']) == 0:
+ return None
+
+ actives = [instance for reservation in response['Reservations'] for instance in reservation['Instances'] if instance['State']['Name'] != 'terminated']
+ if len(actives) == 1:
+ return actives[0]
+ if not actives:
+ return None
+ self.module.fail_json(msg="Error: found multiple instances for name=%s: %s" % (self.parameters['name'], str(actives)))
+
+ def get_ami(self):
+ """
+ Get AWS EC2 Image
+ :return:
+ Latest AMI
+ """
+
+ instance_ami = None
+ client = boto3.client('ec2', region_name=self.parameters['region'])
+
+ try:
+ instance_ami = client.describe_images(
+ Filters=[
+ {
+ 'Name': 'name',
+ 'Values': [
+ self.rest_api.environment_data['AMI_FILTER'],
+ ]
+ },
+ ],
+ Owners=[
+ self.rest_api.environment_data['AWS_ACCOUNT'],
+ ],
+ )
+ except ClientError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ latest_date = instance_ami['Images'][0]['CreationDate']
+ latest_ami = instance_ami['Images'][0]['ImageId']
+
+ for image in instance_ami['Images']:
+ if image['CreationDate'] > latest_date:
+ latest_date = image['CreationDate']
+ latest_ami = image['ImageId']
+
+ return latest_ami
+
+ def create_instance(self):
+ """
+ Create Cloud Manager connector for AWS
+ :return: client_id, instance_id
+ """
+
+ if self.parameters.get('ami') is None:
+ self.parameters['ami'] = self.get_ami()
+
+ user_data, client_id = self.register_agent_to_service()
+
+ ec2 = boto3.client('ec2', region_name=self.parameters['region'])
+
+ tags = [
+ {
+ 'Key': 'Name',
+ 'Value': self.parameters['name']
+ },
+ {
+ 'Key': 'OCCMInstance',
+ 'Value': 'true'
+ },
+ ]
+
+ if self.parameters.get('aws_tag') is not None:
+ for each_tag in self.parameters['aws_tag']:
+ tag = {
+ 'Key': each_tag['tag_key'],
+ 'Value': each_tag['tag_value']
+ }
+
+ tags.append(tag)
+
+ instance_input = {
+ 'BlockDeviceMappings': [
+ {
+ 'DeviceName': '/dev/sda1',
+ 'Ebs': {
+ 'Encrypted': True,
+ 'VolumeSize': 100,
+ 'VolumeType': 'gp2',
+ },
+ },
+ ],
+ 'ImageId': self.parameters['ami'],
+ 'MinCount': 1,
+ 'MaxCount': 1,
+ 'KeyName': self.parameters['key_name'],
+ 'InstanceType': self.parameters['instance_type'],
+ 'DisableApiTermination': self.parameters['enable_termination_protection'],
+ 'TagSpecifications': [
+ {
+ 'ResourceType': 'instance',
+ 'Tags': tags
+ },
+ ],
+ 'IamInstanceProfile': {
+ 'Name': self.parameters['iam_instance_profile_name']
+ },
+ 'UserData': user_data
+ }
+
+ if self.parameters.get('associate_public_ip_address') is True:
+ instance_input['NetworkInterfaces'] = [
+ {
+ 'AssociatePublicIpAddress': self.parameters['associate_public_ip_address'],
+ 'DeviceIndex': 0,
+ 'SubnetId': self.parameters['subnet_id'],
+ 'Groups': self.parameters['security_group_ids']
+ }
+ ]
+ else:
+ instance_input['SubnetId'] = self.parameters['subnet_id']
+ instance_input['SecurityGroupIds'] = self.parameters['security_group_ids']
+
+ try:
+ result = ec2.run_instances(**instance_input)
+ except ClientError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ # Sleep for 2 minutes
+ time.sleep(120)
+ retries = 16
+ while retries > 0:
+ agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, client_id)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: not able to get occm status: %s, %s" % (str(error), str(agent)),
+ client_id=client_id, instance_id=result['Instances'][0]['InstanceId'])
+ if agent['status'] == "active":
+ break
+ else:
+ time.sleep(30)
+ retries -= 1
+ if retries == 0:
+ # Taking too long for status to be active
+ return self.module.fail_json(msg="Error: taking too long for OCCM agent to be active or not properly setup")
+
+ return client_id, result['Instances'][0]['InstanceId']
+
+ def get_vpc(self):
+ """
+ Get vpc
+ :return: vpc ID
+ """
+
+ vpc_result = None
+ ec2 = boto3.client('ec2', region_name=self.parameters['region'])
+
+ vpc_input = {'SubnetIds': [self.parameters['subnet_id']]}
+
+ try:
+ vpc_result = ec2.describe_subnets(**vpc_input)
+ except ClientError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ return vpc_result['Subnets'][0]['VpcId']
+
+ def set_account_id(self):
+ if self.parameters.get('account_id') is None:
+ response, error = self.na_helper.get_or_create_account(self.rest_api)
+ if error is not None:
+ return error
+ self.parameters['account_id'] = response
+ return None
+
+ def register_agent_to_service(self):
+ """
+ Register agent to service and collect userdata by setting up connector
+ :return: UserData, ClientID
+ """
+
+ vpc = self.get_vpc()
+
+ if self.parameters.get('account_id') is None:
+ error = self.set_account_id()
+ if error is not None:
+ self.module.fail_json(msg="Error: failed to get account: %s." % str(error))
+
+ headers = {
+ "X-User-Token": self.rest_api.token_type + " " + self.rest_api.token,
+ "X-Service-Request-Id": "111"
+ }
+ body = {
+ "accountId": self.parameters['account_id'],
+ "name": self.parameters['name'],
+ "company": self.parameters['company'],
+ "placement": {
+ "provider": "AWS",
+ "region": self.parameters['region'],
+ "network": vpc,
+ "subnet": self.parameters['subnet_id'],
+ },
+ "extra": {
+ "proxy": {
+ "proxyUrl": self.parameters.get('proxy_url'),
+ "proxyUserName": self.parameters.get('proxy_user_name'),
+ "proxyPassword": self.parameters.get('proxy_password')
+ }
+ }
+ }
+
+ register_api = '/agents-mgmt/connector-setup'
+ response, error, dummy = self.rest_api.post(register_api, body, header=headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on connector setup: %s, %s" % (str(error), str(response)))
+ client_id = response['clientId']
+ client_secret = response['clientSecret']
+
+ u_data = {
+ 'instanceName': self.parameters['name'],
+ 'company': self.parameters['company'],
+ 'clientId': client_id,
+ 'clientSecret': client_secret,
+ 'systemId': UUID,
+ 'tenancyAccountId': self.parameters['account_id'],
+ 'proxySettings': {'proxyPassword': self.parameters.get('proxy_password'),
+ 'proxyUserName': self.parameters.get('proxy_user_name'),
+ 'proxyUrl': self.parameters.get('proxy_url'),
+ },
+ 'localAgent': True
+ }
+
+ if self.parameters.get('proxy_certificates') is not None:
+ proxy_certificates = []
+ for certificate_file in self.parameters['proxy_certificates']:
+ encoded_certificate, error = self.na_helper.encode_certificates(certificate_file)
+ if error:
+ self.module.fail_json(msg="Error: could not open/read file '%s' of proxy_certificates: %s" % (certificate_file, error))
+ proxy_certificates.append(encoded_certificate)
+
+ if proxy_certificates:
+ u_data['proxySettings']['proxyCertificates'] = proxy_certificates
+
+ user_data = self.na_helper.convert_data_to_tabbed_jsonstring(u_data)
+
+ return user_data, client_id
+
+ def delete_instance(self):
+ """
+ Delete OCCM instance
+ :return:
+ None
+ """
+
+ ec2 = boto3.client('ec2', region_name=self.parameters['region'])
+ try:
+ ec2.terminate_instances(
+ InstanceIds=[
+ self.parameters['instance_id'],
+ ],
+ )
+ except ClientError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ if 'client_id' not in self.parameters:
+ return None
+
+ retries = 30
+ while retries > 0:
+ agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id'])
+ if error is not None:
+ return "Error: not able to get occm agent status after deleting instance: %s, %s." % (str(error), str(agent))
+ if agent['status'] != "active":
+ break
+ else:
+ time.sleep(10)
+ retries -= 1
+ if retries == 0:
+ # Taking too long for terminating OCCM
+ return "Error: taking too long for instance to finish terminating."
+ return None
+
+ def get_occm_agents(self):
+ if 'client_id' in self.parameters:
+ agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id'])
+ if str(error) == '403' and 'Action not allowed for user' in str(agent):
+ # assume the agent does not exist anymore
+ agents, error = [], None
+ self.module.warn('Client Id %s was not found for this account.' % self.parameters['client_id'])
+ else:
+ agents = [agent]
+ else:
+ self.set_account_id()
+ if 'account_id' in self.parameters:
+ agents, error = self.na_helper.get_occm_agents_by_name(self.rest_api, self.parameters['account_id'],
+ self.parameters['name'], 'AWS')
+ else:
+ self.module.warn('Without account_id, some agents may still exist.')
+ agents, error = [], None
+ if error:
+ self.module.fail_json(
+ msg="Error: getting OCCM agents: %s, %s" % (str(error), str(agents)))
+ return agents
+
+ def set_client_id(self):
+ agents = self.get_occm_agents()
+ client_id = self.parameters.get('client_id')
+ if client_id is None:
+ active_client_ids = [agent['agentId'] for agent in agents if 'agentId' in agent and agent['status'] == 'active']
+ if len(active_client_ids) == 1:
+ client_id = active_client_ids[0]
+ self.parameters['client_id'] = client_id
+ return client_id, agents
+
+ def delete_occm_agents(self, agents):
+ error = self.na_helper.delete_occm_agents(self.rest_api, agents)
+ if error:
+ return "Error: deleting OCCM agent(s): %s" % error
+ return None
+
+ def apply(self):
+ """
+ Apply action to the Cloud Manager connector for AWS
+ :return: None
+ """
+ results = {
+ 'account_id': None,
+ 'client_id': None,
+ 'instance_id': None
+ }
+ agents = None
+ current = self.get_instance()
+ if current or self.parameters['state'] == 'absent':
+ if self.parameters.get('instance_id') is None and current:
+ self.parameters['instance_id'] = current['InstanceId']
+ results['instance_id'] = self.parameters.get('instance_id')
+ results['client_id'], agents = self.set_client_id()
+ if current is None and agents:
+ # it's possible the VM instance does not exist, but the clients are still present.
+ current = agents
+
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None and self.parameters['state'] == 'present':
+ results['modify'] = 'Note: modifying an existing connector is not supported at this time.'
+
+ if not self.module.check_mode and self.na_helper.changed:
+ if cd_action == 'create':
+ results['client_id'], results['instance_id'] = self.create_instance()
+ elif cd_action == 'delete':
+ errors = []
+ if self.parameters.get('instance_id'):
+ errors.append(self.delete_instance())
+ if agents:
+ errors.append(self.delete_occm_agents(agents))
+ errors = [error for error in errors if error]
+ if errors:
+ self.module.fail_json(msg='Errors deleting instance or client: %s' % ', '.join(errors))
+
+ results['account_id'] = self.parameters.get('account_id')
+ results['changed'] = self.na_helper.changed
+ self.module.exit_json(**results)
+
+
+def main():
+ """
+ Create Cloud Manager connector for AWS class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppCloudManagerConnectorAWS()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_azure.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_azure.py
new file mode 100644
index 000000000..6f1d30a32
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_azure.py
@@ -0,0 +1,591 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_connector_azure
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_connector_azure
+short_description: NetApp Cloud Manager connector for Azure.
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or delete Cloud Manager connector for Azure.
+
+options:
+
+ state:
+ description:
+ - Whether the specified Cloud Manager connector for Azure should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the Cloud Manager connector for Azure to manage.
+ type: str
+
+ virtual_machine_size:
+ description:
+ - The virtual machine type. (for example, Standard_DS3_v2).
+ - At least 4 CPU and 16 GB of memory are required.
+ type: str
+ default: Standard_DS3_v2
+
+ resource_group:
+ required: true
+ description:
+ - The resource group in Azure where the resources will be created.
+ type: str
+
+ subnet_name:
+ required: true
+ description:
+ - The name of the subnet for the virtual machine.
+ - For example, in /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Network/virtualNetworks/xxx/subnets/default,
+ only default is needed.
+ aliases:
+ - subnet_id
+ type: str
+ version_added: '21.7.0'
+
+ location:
+ required: true
+ description:
+ - The location where the Cloud Manager Connector will be created.
+ type: str
+
+ client_id:
+ description:
+ - The unique client ID of the Connector.
+ - The connector ID.
+ type: str
+
+ subscription_id:
+ required: true
+ description:
+ - The ID of the Azure subscription.
+ type: str
+
+ company:
+ required: true
+ description:
+ - The name of the company of the user.
+ type: str
+
+ vnet_name:
+ required: true
+ description:
+ - The name of the virtual network.
+ - for example, in /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Network/virtualNetworks/default,
+ only default is needed.
+ aliases:
+ - vnet_id
+ type: str
+ version_added: '21.7.0'
+
+ vnet_resource_group:
+ description:
+ - The resource group in Azure associated with the virtual network.
+ - If not provided, its assumed that the VNet is within the previously specified resource group.
+ type: str
+
+ network_security_resource_group:
+ description:
+ - The resource group in Azure associated with the security group.
+ - If not provided, its assumed that the security group is within the previously specified resource group.
+ type: str
+
+ network_security_group_name:
+ required: true
+ description:
+ - The name of the security group for the deployment.
+ type: str
+
+ proxy_certificates:
+ description:
+ - The proxy certificates, a list of certificate file names.
+ type: list
+ elements: str
+
+ associate_public_ip_address:
+ description:
+ - Indicates whether to associate the public IP address to the virtual machine.
+ type: bool
+ default: true
+
+ account_id:
+ required: true
+ description:
+ - The NetApp tenancy account ID.
+ type: str
+
+ proxy_url:
+ description:
+ - The proxy URL, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_user_name:
+ description:
+ - The proxy user name, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_password:
+ description:
+ - The proxy password, if using a proxy to connect to the internet.
+ type: str
+
+ admin_username:
+ required: true
+ description:
+ - The user name for the Connector.
+ type: str
+
+ admin_password:
+ required: true
+ description:
+ - The password for the Connector.
+ type: str
+
+ storage_account:
+ description:
+ - The storage account can be created automatically.
+ - When C(storage_account) is not set, the name is constructed by appending 'sa' to the connector C(name).
+ - Storage account name must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+ type: str
+ version_added: '21.17.0'
+'''
+
+EXAMPLES = """
+- name: Create NetApp Cloud Manager connector for Azure.
+ netapp.cloudmanager.na_cloudmanager_connector_azure:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: bsuhas_ansible_occm
+ location: westus
+ resource_group: occm_group_westus
+ subnet_name: subnetxxxxx
+ vnet_name: Vnetxxxxx
+ subscription_id: "{{ xxxxxxxxxxxxxxxxx }}"
+ account_id: "{{ account-xxxxxxx }}"
+ company: NetApp
+ admin_password: Netapp123456
+ admin_username: bsuhas
+ network_security_group_name: OCCM_SG
+ proxy_url: abc.com
+ proxy_user_name: xyz
+ proxy_password: abcxyz
+ proxy_certificates: [abc.crt.txt, xyz.crt.txt]
+
+- name: Delete NetApp Cloud Manager connector for Azure.
+ netapp.cloudmanager.na_cloudmanager_connector_azure:
+ state: absent
+ name: ansible
+ location: westus
+ resource_group: occm_group_westus
+ network_security_group_name: OCCM_SG
+ subnet_name: subnetxxxxx
+ company: NetApp
+ admin_password: Netapp123456
+ admin_username: bsuhas
+ vnet_name: Vnetxxxxx
+ subscription_id: "{{ xxxxxxxxxxxxxxxxx }}"
+ account_id: "{{ account-xxxxxxx }}"
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ client_id: xxxxxxxxxxxxxxxxxxx
+"""
+
+RETURN = """
+msg:
+ description: Newly created Azure connector id in cloud manager.
+ type: str
+ returned: success
+ sample: 'xxxxxxxxxxxxxxxx'
+"""
+
+import traceback
+import time
+import base64
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+IMPORT_EXCEPTION = None
+
+try:
+ from azure.mgmt.resource import ResourceManagementClient
+ from azure.mgmt.compute import ComputeManagementClient
+ from azure.mgmt.network import NetworkManagementClient
+ from azure.mgmt.storage import StorageManagementClient
+ from azure.mgmt.resource.resources.models import Deployment
+ from azure.common.client_factory import get_client_from_cli_profile
+ from msrestazure.azure_exceptions import CloudError
+ HAS_AZURE_LIB = True
+except ImportError as exc:
+ HAS_AZURE_LIB = False
+ IMPORT_EXCEPTION = exc
+
+
+class NetAppCloudManagerConnectorAzure(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ virtual_machine_size=dict(required=False, type='str', default='Standard_DS3_v2'),
+ resource_group=dict(required=True, type='str'),
+ subscription_id=dict(required=True, type='str'),
+ subnet_name=dict(required=True, type='str', aliases=['subnet_id']),
+ vnet_name=dict(required=True, type='str', aliases=['vnet_id']),
+ vnet_resource_group=dict(required=False, type='str'),
+ location=dict(required=True, type='str'),
+ network_security_resource_group=dict(required=False, type='str'),
+ network_security_group_name=dict(required=True, type='str'),
+ client_id=dict(required=False, type='str'),
+ company=dict(required=True, type='str'),
+ proxy_certificates=dict(required=False, type='list', elements='str'),
+ associate_public_ip_address=dict(required=False, type='bool', default=True),
+ account_id=dict(required=True, type='str'),
+ proxy_url=dict(required=False, type='str'),
+ proxy_user_name=dict(required=False, type='str'),
+ proxy_password=dict(required=False, type='str', no_log=True),
+ admin_username=dict(required=True, type='str'),
+ admin_password=dict(required=True, type='str', no_log=True),
+ storage_account=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ['state', 'absent', ['client_id']]
+ ],
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ supports_check_mode=True
+ )
+
+ if HAS_AZURE_LIB is False:
+ self.module.fail_json(msg="the python AZURE library azure.mgmt and azure.common is required. Command is pip install azure-mgmt, azure-common."
+ " Import error: %s" % str(IMPORT_EXCEPTION))
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ if 'storage_account' not in self.parameters or self.parameters['storage_account'] == "":
+ self.parameters['storage_account'] = self.parameters['name'].lower() + 'sa'
+ self.rest_api = CloudManagerRestAPI(self.module)
+
+ def get_deploy_azure_vm(self):
+ """
+ Get Cloud Manager connector for AZURE
+ :return:
+ Dictionary of current details if Cloud Manager connector for AZURE
+ None if Cloud Manager connector for AZURE is not found
+ """
+
+ exists = False
+
+ resource_client = get_client_from_cli_profile(ResourceManagementClient)
+ try:
+ exists = resource_client.deployments.check_existence(self.parameters['resource_group'], self.parameters['name'])
+
+ except CloudError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ if not exists:
+ return None
+
+ return exists
+
+ def deploy_azure(self):
+ """
+ Create Cloud Manager connector for Azure
+ :return: client_id
+ """
+
+ user_data, client_id = self.register_agent_to_service()
+ template = json.loads(self.na_helper.call_template())
+ params = json.loads(self.na_helper.call_parameters())
+ params['adminUsername']['value'] = self.parameters['admin_username']
+ params['adminPassword']['value'] = self.parameters['admin_password']
+ params['customData']['value'] = json.dumps(user_data)
+ params['location']['value'] = self.parameters['location']
+ params['virtualMachineName']['value'] = self.parameters['name']
+ params['storageAccount']['value'] = self.parameters['storage_account']
+ if self.rest_api.environment == 'stage':
+ params['environment']['value'] = self.rest_api.environment
+ if '/subscriptions' in self.parameters['vnet_name']:
+ network = self.parameters['vnet_name']
+ else:
+ if self.parameters.get('vnet_resource_group') is not None:
+ network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % (
+ self.parameters['subscription_id'], self.parameters['vnet_resource_group'], self.parameters['vnet_name'])
+ else:
+ network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % (
+ self.parameters['subscription_id'], self.parameters['resource_group'], self.parameters['vnet_name'])
+
+ if '/subscriptions' in self.parameters['subnet_name']:
+ subnet = self.parameters['subnet_name']
+ else:
+ subnet = '%s/subnets/%s' % (network, self.parameters['subnet_name'])
+
+ if self.parameters.get('network_security_resource_group') is not None:
+ network_security_group_name = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups/%s' % (
+ self.parameters['subscription_id'], self.parameters['network_security_resource_group'], self.parameters['network_security_group_name'])
+ else:
+ network_security_group_name = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups/%s' % (
+ self.parameters['subscription_id'], self.parameters['resource_group'], self.parameters['network_security_group_name'])
+
+ params['virtualNetworkId']['value'] = network
+ params['networkSecurityGroupName']['value'] = network_security_group_name
+ params['virtualMachineSize']['value'] = self.parameters['virtual_machine_size']
+ params['subnetId']['value'] = subnet
+
+ try:
+ resource_client = get_client_from_cli_profile(ResourceManagementClient)
+
+ resource_client.resource_groups.create_or_update(
+ self.parameters['resource_group'],
+ {"location": self.parameters['location']})
+
+ deployment_properties = {
+ 'mode': 'Incremental',
+ 'template': template,
+ 'parameters': params
+ }
+ resource_client.deployments.begin_create_or_update(
+ self.parameters['resource_group'],
+ self.parameters['name'],
+ Deployment(properties=deployment_properties)
+ )
+
+ except CloudError as error:
+ self.module.fail_json(msg="Error in deploy_azure: %s" % to_native(error), exception=traceback.format_exc())
+
+ # Sleep for 2 minutes
+ time.sleep(120)
+ retries = 30
+ while retries > 0:
+ occm_resp, error = self.na_helper.check_occm_status(self.rest_api, client_id)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: Not able to get occm status: %s, %s" % (str(error), str(occm_resp)))
+ if occm_resp['agent']['status'] == "active":
+ break
+ else:
+ time.sleep(30)
+ retries -= 1
+ if retries == 0:
+ # Taking too long for status to be active
+ return self.module.fail_json(msg="Taking too long for OCCM agent to be active or not properly setup")
+
+ try:
+ compute_client = get_client_from_cli_profile(ComputeManagementClient)
+ vm = compute_client.virtual_machines.get(self.parameters['resource_group'], self.parameters['name'])
+ except CloudError as error:
+ return self.module.fail_json(msg="Error in deploy_azure (get identity): %s" % to_native(error), exception=traceback.format_exc())
+
+ principal_id = vm.identity.principal_id
+ return client_id, principal_id
+
+ def register_agent_to_service(self):
+ """
+ Register agent to service and collect userdata by setting up connector
+ :return: UserData, ClientID
+ """
+
+ if '/subscriptions' in self.parameters['vnet_name']:
+ network = self.parameters['vnet_name']
+ else:
+ if self.parameters.get('vnet_resource_group') is not None:
+ network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % (
+ self.parameters['subscription_id'], self.parameters['vnet_resource_group'], self.parameters['vnet_name'])
+ else:
+ network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % (
+ self.parameters['subscription_id'], self.parameters['resource_group'], self.parameters['vnet_name'])
+
+ if '/subscriptions' in self.parameters['subnet_name']:
+ subnet = self.parameters['subnet_name']
+ else:
+ subnet = '%s/subnets/%s' % (network, self.parameters['subnet_name'])
+
+ if self.parameters.get('account_id') is None:
+ response, error = self.na_helper.get_or_create_account(self.rest_api)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: unexpected response on getting account: %s, %s" % (str(error), str(response)))
+ self.parameters['account_id'] = response
+
+ headers = {
+ "X-User-Token": self.rest_api.token_type + " " + self.rest_api.token,
+ }
+ body = {
+ "accountId": self.parameters['account_id'],
+ "name": self.parameters['name'],
+ "company": self.parameters['company'],
+ "placement": {
+ "provider": "AZURE",
+ "region": self.parameters['location'],
+ "network": network,
+ "subnet": subnet,
+ },
+ "extra": {
+ "proxy": {
+ "proxyUrl": self.parameters.get('proxy_url'),
+ "proxyUserName": self.parameters.get('proxy_user_name'),
+ "proxyPassword": self.parameters.get('proxy_password')
+ }
+ }
+ }
+
+ register_url = "%s/agents-mgmt/connector-setup" % self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ response, error, dummy = self.rest_api.post(register_url, body, header=headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on getting userdata for connector setup: %s, %s" % (str(error), str(response)))
+ client_id = response['clientId']
+
+ proxy_certificates = []
+ if self.parameters.get('proxy_certificates') is not None:
+ for each in self.parameters['proxy_certificates']:
+ try:
+ data = open(each, "r").read()
+ except OSError:
+ self.module.fail_json(msg="Error: Could not open/read file of proxy_certificates: %s" % str(each))
+
+ encoded_certificate = base64.b64encode(data)
+ proxy_certificates.append(encoded_certificate)
+
+ if proxy_certificates:
+ response['proxySettings']['proxyCertificates'] = proxy_certificates
+
+ return response, client_id
+
+ def delete_azure_occm(self):
+ """
+ Delete OCCM
+ :return:
+ None
+ """
+ # delete vm deploy
+ try:
+ compute_client = get_client_from_cli_profile(ComputeManagementClient)
+ vm_delete = compute_client.virtual_machines.begin_delete(
+ self.parameters['resource_group'],
+ self.parameters['name'])
+ while not vm_delete.done():
+ vm_delete.wait(2)
+ except CloudError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ # delete interfaces deploy
+ try:
+ network_client = get_client_from_cli_profile(NetworkManagementClient)
+ interface_delete = network_client.network_interfaces.begin_delete(
+ self.parameters['resource_group'],
+ self.parameters['name'] + '-nic')
+ while not interface_delete.done():
+ interface_delete.wait(2)
+ except CloudError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ # delete storage account deploy
+ try:
+ storage_client = get_client_from_cli_profile(StorageManagementClient)
+ storage_client.storage_accounts.delete(
+ self.parameters['resource_group'],
+ self.parameters['storage_account'])
+ except CloudError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ # delete storage account deploy
+ try:
+ network_client = get_client_from_cli_profile(NetworkManagementClient)
+ public_ip_addresses_delete = network_client.public_ip_addresses.begin_delete(
+ self.parameters['resource_group'],
+ self.parameters['name'] + '-ip')
+ while not public_ip_addresses_delete.done():
+ public_ip_addresses_delete.wait(2)
+ except CloudError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ # delete deployment
+ try:
+ resource_client = get_client_from_cli_profile(ResourceManagementClient)
+ deployments_delete = resource_client.deployments.begin_delete(
+ self.parameters['resource_group'],
+ self.parameters['name'] + '-ip')
+ while not deployments_delete.done():
+ deployments_delete.wait(5)
+ except CloudError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ retries = 16
+ while retries > 0:
+ occm_resp, error = self.na_helper.check_occm_status(self.rest_api,
+ self.parameters['client_id'])
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: Not able to get occm status: %s, %s" % (str(error), str(occm_resp)))
+ if occm_resp['agent']['status'] != "active":
+ break
+ else:
+ time.sleep(10)
+ retries -= 1
+ if retries == 0:
+ # Taking too long for terminating OCCM
+ return self.module.fail_json(msg="Taking too long for instance to finish terminating")
+ client = self.rest_api.format_client_id(self.parameters['client_id'])
+ error = self.na_helper.delete_occm_agents(self.rest_api, [{'agentId': client}])
+ if error:
+ self.module.fail_json(msg="Error: unexpected response on deleting OCCM: %s" % (str(error)))
+
+ def apply(self):
+ """
+ Apply action to the Cloud Manager connector for AZURE
+ :return: None
+ """
+ client_id = None
+ principal_id = None
+ if not self.module.check_mode:
+ if self.parameters['state'] == 'present':
+ client_id, principal_id = self.deploy_azure()
+ self.na_helper.changed = True
+ elif self.parameters['state'] == 'absent':
+ get_deploy = self.get_deploy_azure_vm()
+ if get_deploy:
+ self.delete_azure_occm()
+ self.na_helper.changed = True
+
+ self.module.exit_json(changed=self.na_helper.changed, msg={'client_id': client_id, 'principal_id': principal_id})
+
+
+def main():
+ """
+ Create Cloud Manager connector for AZURE class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppCloudManagerConnectorAzure()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_gcp.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_gcp.py
new file mode 100644
index 000000000..bea686f4c
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_connector_gcp.py
@@ -0,0 +1,644 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_connector_gcp
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_connector_gcp
+short_description: NetApp Cloud Manager connector for GCP.
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create or delete Cloud Manager connector for GCP.
+
+options:
+ state:
+ description:
+ - Whether the specified Cloud Manager connector for GCP should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the Cloud Manager connector for GCP to manage.
+ type: str
+
+ project_id:
+ description:
+ - The GCP project_id where the connector will be created.
+ required: true
+ type: str
+
+ zone:
+ description:
+ - The GCP zone where the Connector will be created.
+ required: true
+ type: str
+
+ gcp_service_account_email:
+ description:
+ - The email of the service_account for the connector instance. This service account is used to allow the Connector to create Cloud Volume ONTAP.
+ required: true
+ type: str
+ aliases: ['service_account_email']
+ version_added: 21.7.0
+
+ company:
+ description:
+ - The name of the company of the user.
+ required: true
+ type: str
+
+ gcp_service_account_path:
+ description:
+ - The local path of the service_account JSON file for GCP authorization purposes. This service account is used to create the Connector in GCP.
+ type: str
+ aliases: ['service_account_path']
+ version_added: 21.7.0
+
+ subnet_id:
+ description:
+ - The name of the subnet for the virtual machine.
+ type: str
+ default: default
+
+ network_project_id:
+ description:
+ - The project id in GCP associated with the Subnet. If not provided, it is assumed that the Subnet is within the previously specified project id.
+ type: str
+
+ machine_type:
+ description:
+ - The machine_type for the Connector VM.
+ type: str
+ default: n2-standard-4
+
+ firewall_tags:
+ description:
+ - Indicates whether to add firewall_tags to the connector VM (HTTP and HTTP).
+ type: bool
+ default: true
+
+ associate_public_ip:
+ description:
+ - Indicates whether to associate a public IP address to the virtual machine.
+ type: bool
+ default: true
+
+ proxy_url:
+ description:
+ - The proxy URL, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_user_name:
+ description:
+ - The proxy user name, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_password:
+ description:
+ - The proxy password, if using a proxy to connect to the internet.
+ type: str
+
+ proxy_certificates:
+ description:
+ - The proxy certificates. A list of certificate file names.
+ type: list
+ elements: str
+
+ account_id:
+ description:
+ - The NetApp account ID that the Connector will be associated with.
+ - If not provided, Cloud Manager uses the first account. If no account exists, Cloud Manager creates a new account.
+ - You can find the account ID in the account tab of Cloud Manager at [https://cloudmanager.netapp.com](https://cloudmanager.netapp.com).
+ type: str
+
+ client_id:
+ description:
+ - The client ID of the Cloud Manager Connector.
+ - The connector ID.
+ - If state is absent, the client id is used to identify the agent and delete it.
+ - If state is absent and this parameter is not set, all agents associated with C(name) are deleted.
+ - Ignored when state is present.
+ type: str
+
+'''
+
+EXAMPLES = """
+- name: Create NetApp Cloud Manager connector for GCP
+ netapp.cloudmanager.na_cloudmanager_connector_gcp:
+ state: present
+ name: ansible-occm-gcp
+ project_id: xxxxxxx-support
+ zone: us-east4-b
+ company: NetApp
+ gcp_service_account_email: xxxxxxxx@xxxxxxx-support.iam.gserviceaccount.com
+ gcp_service_account_path: gcp_creds.json
+ proxy_user_name: test
+ proxy_password: test
+ proxy_url: http://abcdefg.com
+ proxy_certificates: ["D-TRUST_Root_Class_3_CA_2_2009.crt", "DigiCertGlobalRootCA.crt", "DigiCertGlobalRootG2.crt"]
+ account_id: account-xxxxXXXX
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+
+- name: Delete NetApp Cloud Manager connector for GCP
+ netapp.cloudmanager.na_cloudmanager_connector_gcp:
+ state: absent
+ name: ansible-occm-gcp
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ client_id: "{{ wwwwwwwwww }}"
+ project_id: xxxxxxx-support
+ zone: us-east4-b
+ company: NetApp
+ gcp_service_account_email: xxxxxxxx@xxxxxxx-support.iam.gserviceaccount.com
+ gcp_service_account_path: gcp_creds.json
+ account_id: account-xxxxXXXX
+"""
+
+RETURN = """
+client_id:
+ description: Newly created GCP connector id on cloud manager.
+ type: str
+ returned: success
+ sample: 'FDQE8SwrbjVS6mqUgZoOHQmu2DvBNRRW'
+client_ids:
+ description:
+ - a list of client ids matching the name and provider if the connector already exists.
+ - ideally the list should be empty, or contain a single element matching client_id.
+ type: list
+ elements: str
+ returned: success
+ sample: ['FDQE8SwrbjVS6mqUgZoOHQmu2DvBNRRW']
+"""
+import uuid
+import time
+import base64
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+IMPORT_ERRORS = []
+HAS_GCP_COLLECTION = False
+
+try:
+ import google.auth
+ from google.auth.transport import requests
+ from google.oauth2 import service_account
+ import yaml
+ HAS_GCP_COLLECTION = True
+except ImportError as exc:
+ IMPORT_ERRORS.append(str(exc))
+
+GCP_DEPLOYMENT_MANAGER = "www.googleapis.com"
+UUID = str(uuid.uuid4())
+
+
+class NetAppCloudManagerConnectorGCP(object):
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ project_id=dict(required=True, type='str'),
+ zone=dict(required=True, type='str'),
+ company=dict(required=True, type='str'),
+ gcp_service_account_email=dict(required=True, type='str', aliases=['service_account_email']),
+ gcp_service_account_path=dict(required=False, type='str', aliases=['service_account_path']),
+ subnet_id=dict(required=False, type='str', default='default'),
+ network_project_id=dict(required=False, type='str'),
+ machine_type=dict(required=False, type='str', default='n2-standard-4'),
+ firewall_tags=dict(required=False, type='bool', default=True),
+ associate_public_ip=dict(required=False, type='bool', default=True),
+ proxy_url=dict(required=False, type='str'),
+ proxy_user_name=dict(required=False, type='str'),
+ proxy_password=dict(required=False, type='str', no_log=True),
+ proxy_certificates=dict(required=False, type='list', elements='str'),
+ account_id=dict(required=False, type='str'),
+ client_id=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.gcp_common_suffix_name = "-vm-boot-deployment"
+ self.fail_when_import_errors(IMPORT_ERRORS, HAS_GCP_COLLECTION)
+ super(NetAppCloudManagerConnectorGCP, self).__init__()
+
+ self.rest_api.gcp_token, error = self.get_gcp_token()
+ if error:
+ self.module.fail_json(msg='Error getting gcp token: %s' % repr(error))
+
+ def get_gcp_token(self):
+ '''
+ get gcp token from gcp service account credential json file
+ '''
+ scopes = ["https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly",
+ "https://www.googleapis.com/auth/ndev.cloudman",
+ "https://www.googleapis.com/auth/ndev.cloudman.readonly",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"]
+ if 'gcp_service_account_path' in self.parameters:
+ try:
+ fh = open(self.parameters['gcp_service_account_path'])
+ except (OSError, IOError) as error:
+ return None, "opening %s: got: %s" % (self.parameters['gcp_service_account_path'], repr(error))
+ with fh:
+ key_bytes = json.load(fh)
+ if key_bytes is None:
+ return None, "Error: gcp_service_account_path file is empty"
+ credentials = service_account.Credentials.from_service_account_file(self.parameters['gcp_service_account_path'], scopes=scopes)
+ else:
+ credentials, project = google.auth.default(scopes=scopes)
+
+ credentials.refresh(requests.Request())
+
+ return credentials.token, None
+
+ def fail_when_import_errors(self, import_errors, has_gcp_collection=True):
+ if has_gcp_collection and not import_errors:
+ return
+ msg = ''
+ if not has_gcp_collection:
+ msg = 'The python google-auth package is required. '
+ msg += 'Import errors: %s' % str(import_errors)
+ self.module.fail_json(msg=msg)
+
+ def get_deploy_vm(self):
+ '''
+ Get Cloud Manager connector for GCP
+ :return:
+ Dictionary of current details if Cloud Manager connector for GCP
+ None if Cloud Manager connector for GCP is not found
+ '''
+ api_url = GCP_DEPLOYMENT_MANAGER + '/deploymentmanager/v2/projects/%s/global/deployments/%s%s' % (
+ self.parameters['project_id'], self.parameters['name'], self.gcp_common_suffix_name)
+ headers = {
+ "X-User-Token": self.rest_api.token_type + " " + self.rest_api.token,
+ 'Authorization': self.rest_api.token_type + " " + self.rest_api.gcp_token,
+ }
+
+ occm_status, error, dummy = self.rest_api.get(api_url, header=headers)
+ if error is not None:
+ if error == '404' and b'is not found' in occm_status:
+ return None
+ self.module.fail_json(
+ msg="Error: unexpected response on getting occm: %s, %s" % (str(error), str(occm_status)))
+
+ return occm_status
+
+ def get_custom_data_for_gcp(self, proxy_certificates):
+ '''
+ get custom data for GCP
+ '''
+ # get account ID
+ if 'account_id' not in self.parameters:
+ # get account ID
+ response, error = self.na_helper.get_or_create_account(self.rest_api)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: unexpected response on getting account: %s, %s" % (str(error), str(response)))
+ self.parameters['account_id'] = response
+ # registerAgentTOServiceForGCP
+ response, error = self.na_helper.register_agent_to_service(self.rest_api, "GCP", "")
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: register agent to service for gcp failed: %s, %s" % (str(error), str(response)))
+ # add proxy_certificates as part of json data
+ client_id = response['clientId']
+ client_secret = response['clientSecret']
+ u_data = {
+ 'instanceName': self.parameters['name'],
+ 'company': self.parameters['company'],
+ 'clientId': client_id,
+ 'clientSecret': client_secret,
+ 'systemId': UUID,
+ 'tenancyAccountId': self.parameters['account_id'],
+ 'proxySettings': {'proxyPassword': self.parameters.get('proxy_password'),
+ 'proxyUserName': self.parameters.get('proxy_user_name'),
+ 'proxyUrl': self.parameters.get('proxy_url'),
+ 'proxyCertificates': proxy_certificates,
+ },
+ }
+ # convert response to json format
+ user_data = json.dumps(u_data)
+ return user_data, client_id, None
+
+ def deploy_gcp_vm(self, proxy_certificates):
+ '''
+ deploy GCP VM
+ '''
+ # getCustomDataForGCP
+ response, client_id, error = self.get_custom_data_for_gcp(proxy_certificates)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: Not able to get user data for GCP: %s, %s" % (str(error), str(response)))
+ # compose
+ user_data = response
+ gcp_custom_data = base64.b64encode(user_data.encode())
+ gcp_sa_scopes = ["https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly",
+ "https://www.googleapis.com/auth/ndev.cloudman",
+ "https://www.googleapis.com/auth/ndev.cloudman.readonly"]
+
+ tags = []
+ if self.parameters['firewall_tags'] is True:
+ tags = {'items': ['firewall-tag-bvsu', 'http-server', 'https-server']}
+
+ # first resource
+ device_name = self.parameters['name'] + '-vm-disk-boot'
+ t = {
+ 'name': self.parameters['name'] + '-vm',
+ 'properties': {
+ 'disks': [
+ {'autoDelete': True,
+ 'boot': True,
+ 'deviceName': device_name,
+ 'name': device_name,
+ 'source': "\\\"$(ref.%s.selfLink)\\\"" % device_name,
+ 'type': "PERSISTENT",
+ },
+ ],
+ 'machineType': "zones/%s/machineTypes/%s" % (self.parameters['zone'], self.parameters['machine_type']),
+ 'metadata': {
+ 'items': [
+ {'key': 'serial-port-enable',
+ 'value': 1},
+ {'key': 'customData',
+ 'value': gcp_custom_data}
+ ]
+ },
+ 'serviceAccounts': [{'email': self.parameters['gcp_service_account_email'],
+ 'scopes': gcp_sa_scopes, }],
+ 'tags': tags,
+ 'zone': self.parameters['zone']
+ },
+ 'metadata': {'dependsOn': [device_name]},
+ 'type': 'compute.v1.instance',
+ }
+
+ access_configs = []
+ if self.parameters['associate_public_ip'] is True:
+ access_configs = [{'kind': 'compute#accessConfig',
+ 'name': 'External NAT',
+ 'type': 'ONE_TO_ONE_NAT',
+ 'networkTier': 'PREMIUM'
+ }]
+ project_id = self.parameters['project_id']
+ if self.parameters.get('network_project_id'):
+ project_id = self.parameters['network_project_id']
+
+ t['properties']['networkInterfaces'] = [
+ {'accessConfigs': access_configs,
+ 'kind': 'compute#networkInterface',
+ 'subnetwork': 'projects/%s/regions/%s/subnetworks/%s' % (
+ project_id, self.parameters['region'], self.parameters['subnet_id'])
+ }]
+
+ td = {
+ 'name': device_name,
+ 'properties': {'name': device_name,
+ 'sizeGb': 100,
+ 'sourceImage': 'projects/%s/global/images/family/%s' % (self.rest_api.environment_data['GCP_IMAGE_PROJECT'],
+ self.rest_api.environment_data['GCP_IMAGE_FAMILY']),
+ 'type': 'zones/%s/diskTypes/pd-ssd' % (self.parameters['zone']),
+ 'zone': self.parameters['zone']
+ },
+ 'type': 'compute.v1.disks',
+ }
+ content = {
+ 'resources': [t, td]
+ }
+ my_data = str(yaml.dump(content))
+ # The template must be in this format:
+ # {
+ # "name": "ansible-cycc-vm-boot-deployment",
+ # "target": {
+ # "config": {
+ # "content": "resources:
+ # - name: xxxx
+ # properties:
+ # ...
+ # "
+ # }
+ # }
+ # }
+ gcp_deployment_template = '{\n "name": "%s%s",\n "target": {\n "config": {\n "content": "%s"\n }\n}\n}' % (
+ self.parameters['name'], '-vm-boot-deployment', my_data)
+
+ # post
+ api_url = GCP_DEPLOYMENT_MANAGER + '/deploymentmanager/v2/projects/%s/global/deployments' % (
+ self.parameters['project_id'])
+
+ headers = {
+ 'X-User-Token': self.rest_api.token_type + " " + self.rest_api.gcp_token,
+ 'X-Tenancy-Account-Id': self.parameters['account_id'],
+ 'Authorization': self.rest_api.token_type + " " + self.rest_api.gcp_token,
+ 'Content-type': "application/json",
+ 'Referer': "Ansible_NetApp",
+ 'X-Agent-Id': self.rest_api.format_client_id(client_id)
+ }
+
+ response, error, dummy = self.rest_api.post(api_url, data=gcp_deployment_template, header=headers,
+ gcp_type=True)
+ if error is not None:
+ return response, client_id, error
+
+ # check occm status
+ # Sleep for 1 minutes
+ time.sleep(60)
+ retries = 16
+ while retries > 0:
+ agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, client_id)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: Not able to get occm status: %s, %s" % (str(error), str(agent)),
+ client_id=client_id, changed=True)
+ if agent['status'] == "active":
+ break
+ else:
+ time.sleep(30)
+ retries -= 1
+ if retries == 0:
+ # Taking too long for status to be active
+ msg = "Connector VM is created and registered. Taking too long for OCCM agent to be active or not properly setup."
+ msg += ' Latest status: %s' % agent
+ self.module.fail_json(msg=msg, client_id=client_id, changed=True)
+
+ return response, client_id, error
+
+ def create_occm_gcp(self):
+ '''
+ Create Cloud Manager connector for GCP
+ '''
+ # check proxy configuration
+ if 'proxy_user_name' in self.parameters and 'proxy_url' not in self.parameters:
+ self.module.fail_json(msg="Error: missing proxy_url")
+ if 'proxy_password' in self.parameters and 'proxy_url' not in self.parameters:
+ self.module.fail_json(msg="Error: missing proxy_url")
+
+ proxy_certificates = []
+ if 'proxy_certificates' in self.parameters:
+ for c_file in self.parameters['proxy_certificates']:
+ proxy_certificate, error = self.na_helper.encode_certificates(c_file)
+ # add to proxy_certificates list
+ if error is not None:
+ self.module.fail_json(msg="Error: not able to read certificate file %s" % c_file)
+ proxy_certificates.append(proxy_certificate)
+ # region is the super class of zone. For example, zone us-east4-b is one of the zone in region us-east4
+ self.parameters['region'] = self.parameters['zone'][:-2]
+ # deploy GCP VM
+ response, client_id, error = self.deploy_gcp_vm(proxy_certificates)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: create_occm_gcp: %s, %s" % (str(error), str(response)))
+ return client_id
+
+ def delete_occm_gcp(self):
+ '''
+ Delete Cloud Manager connector for GCP
+ '''
+ api_url = GCP_DEPLOYMENT_MANAGER + '/deploymentmanager/v2/projects/%s/global/deployments/%s%s' % (
+ self.parameters['project_id'],
+ self.parameters['name'],
+ self.gcp_common_suffix_name)
+ headers = {
+ "X-User-Token": self.rest_api.token_type + " " + self.rest_api.token,
+ 'Authorization': self.rest_api.token_type + " " + self.rest_api.gcp_token,
+ 'X-Tenancy-Account-Id': self.parameters['account_id'],
+ 'Content-type': "application/json",
+ 'Referer': "Ansible_NetApp",
+ }
+
+ response, error, dummy = self.rest_api.delete(api_url, None, header=headers)
+ if error is not None:
+ return "Error: unexpected response on deleting VM: %s, %s" % (str(error), str(response))
+ # sleep for 30 sec
+ time.sleep(30)
+ if 'client_id' not in self.parameters:
+ return None
+ # check occm status
+ retries = 30
+ while retries > 0:
+ agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id'])
+ if error is not None:
+ return "Error: Not able to get occm status after deleting VM: %s, %s" % (str(error), str(agent))
+ if agent['status'] != ["active", "pending"]:
+ break
+ else:
+ time.sleep(10)
+ retries -= 1 if agent['status'] == "active" else 5
+ if retries == 0 and agent['status'] == "active":
+ # Taking too long for terminating OCCM
+ return "Taking too long for instance to finish terminating. Latest status: %s" % str(agent)
+ return None
+
+ def delete_occm_agents(self, agents):
+ error = self.na_helper.delete_occm_agents(self.rest_api, agents)
+ if error:
+ return "Error: deleting OCCM agent(s): %s" % error
+ return None
+
+ def get_occm_agents(self):
+ if 'client_id' in self.parameters and self.parameters['state'] == 'absent':
+ agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id'])
+ if error == '403' and b'Action not allowed for user' in agent:
+ # assume the agent does not exist anymore
+ agents, error = [], None
+ self.module.warn('Client Id %s was not found for this account.' % self.parameters['client_id'])
+ else:
+ agents = [agent]
+ else:
+ agents, error = self.na_helper.get_occm_agents_by_name(self.rest_api, self.parameters['account_id'],
+ self.parameters['name'], 'GCP')
+ if error:
+ self.module.fail_json(
+ msg="Error: getting OCCM agents: %s, %s" % (str(error), str(agents)))
+ return agents
+
+ def set_client_id(self, agents):
+ client_id = ""
+ client_ids = [agent['agentId'] for agent in agents if 'agentId' in agent]
+ if len(client_ids) == 1:
+ client_id = client_ids[0]
+ self.parameters['client_id'] = client_ids[0]
+ elif 'client_id' in self.parameters and self.parameters['client_id'] in client_ids:
+ client_id = self.parameters['client_id']
+ return client_id, client_ids
+
+ def apply(self):
+ """
+ Apply action to the Cloud Manager connector for GCP
+ :return: None
+ """
+ client_id = ""
+ agents, client_ids = [], []
+ current_vm = self.get_deploy_vm()
+ if current_vm and current_vm['operation']['status'] == 'terminated':
+ current_vm = None
+ current = current_vm
+ if self.parameters['state'] == 'absent' or current:
+ agents = self.get_occm_agents()
+ client_id, client_ids = self.set_client_id(agents)
+ if agents and current is None:
+ current = {}
+ if agents:
+ current['agents'] = agents
+
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ client_id = self.create_occm_gcp()
+ elif cd_action == 'delete':
+ errors = []
+ if current_vm:
+ error = self.delete_occm_gcp()
+ if error:
+ errors.append(error)
+ if agents:
+ error = self.delete_occm_agents(agents)
+ if error:
+ errors.append(error)
+ if errors:
+ self.module.fail_json(msg='. '.join(errors))
+
+ self.module.exit_json(changed=self.na_helper.changed, client_id=client_id, client_ids=client_ids)
+
+
+def main():
+ """
+ Create Cloud Manager connector for GCP class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppCloudManagerConnectorGCP()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_aws.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_aws.py
new file mode 100644
index 000000000..3de1ebc53
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_aws.py
@@ -0,0 +1,855 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_cvo_aws
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_cvo_aws
+short_description: NetApp Cloud Manager CVO for AWS
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, delete, or manage Cloud Manager CVO for AWS.
+
+options:
+
+ state:
+ description:
+ - Whether the specified Cloud Manager CVO for AWS should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the Cloud Manager CVO for AWS to manage.
+ type: str
+
+ instance_type:
+ description:
+ - The instance type to use, which depends on the license type.
+ - Explore ['m5.xlarge'].
+ - Standard ['m5.2xlarge','r5.xlarge'].
+ - Premium ['m5.4xlarge','r5.2xlarge','c4.8xlarge'].
+ - For more supported instance types, refer to Cloud Volumes ONTAP Release Notes.
+ type: str
+ default: m5.2xlarge
+
+ license_type:
+ description:
+ - The type of license to use.
+ - For single node by Capacity ['capacity-paygo']
+ - For single node by Node paygo ['cot-explore-paygo', 'cot-standard-paygo', 'cot-premium-paygo'].
+ - For single node by Node boyl ['cot-premium-byol'].
+ - For HA by Capacity ['ha-capacity-paygo']
+ - For HA by Node paygo ['ha-cot-explore-paygo','ha-cot-standard-paygo','ha-cot-premium-paygo'].
+ - For HA by Node boyl ['ha-cot-premium-byol'].
+ choices: ['capacity-paygo', 'cot-standard-paygo', 'cot-premium-paygo', 'cot-explore-paygo', 'cot-premium-byol', \
+ 'ha-cot-standard-paygo', 'ha-cot-premium-paygo', 'ha-cot-premium-byol', 'ha-cot-explore-paygo', \
+ 'ha-capacity-paygo']
+ default: capacity-paygo
+ type: str
+
+ provided_license:
+ description:
+ - Using a NLF license file for BYOL deployment.
+ type: str
+
+ capacity_package_name:
+ description:
+ - Capacity package name is required when selecting a capacity based license.
+ - Essential only available with Bring Your Own License Capacity-Based.
+ - Professional available as an annual contract from AWS marketplace or Bring Your Own License Capacity-Based.
+ choices: ['Professional', 'Essential', 'Freemium']
+ default: 'Essential'
+ type: str
+ version_added: 21.12.0
+
+ workspace_id:
+ description:
+ - The ID of the Cloud Manager workspace where you want to deploy Cloud Volumes ONTAP.
+ - If not provided, Cloud Manager uses the first workspace.
+ - You can find the ID from the Workspace tab on U(https://cloudmanager.netapp.com).
+ type: str
+
+ subnet_id:
+ description:
+ - The subnet id where the working environment will be created. Required when single node only.
+ type: str
+
+ vpc_id:
+ description:
+ - The VPC ID where the working environment will be created.
+ - If this argument is not provided, the VPC will be calculated by using the provided subnet ID.
+ type: str
+
+ region:
+ required: true
+ description:
+ - The region where the working environment will be created.
+ type: str
+
+ data_encryption_type:
+ description:
+ - The type of encryption to use for the working environment.
+ choices: ['AWS', 'NONE']
+ default: 'AWS'
+ type: str
+
+ client_id:
+ required: true
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ - You can find the ID from the Connector tab on U(https://cloudmanager.netapp.com).
+ type: str
+
+ ebs_volume_size:
+ description:
+ - EBS volume size for the first data aggregate.
+ - For GB, the value can be [100 or 500].
+ - For TB, the value can be [1,2,4,8,16].
+ default: 1
+ type: int
+
+ ebs_volume_size_unit:
+ description:
+ - The unit for ebs volume size.
+ choices: ['GB', 'TB']
+ default: 'TB'
+ type: str
+
+ ebs_volume_type:
+ description:
+ - The EBS volume type for the first data aggregate.
+ choices: ['gp3', 'gp2', 'io1', 'sc1', 'st1']
+ default: 'gp2'
+ type: str
+
+ security_group_id:
+ description:
+ - The ID of the security group for the working environment. If not provided, Cloud Manager creates the security group.
+ type: str
+
+ instance_profile_name:
+ description:
+ - The instance profile name for the working environment. If not provided, Cloud Manager creates the instance profile.
+ type: str
+
+ svm_password:
+ required: true
+ description:
+ - The admin password for Cloud Volumes ONTAP.
+ - It will be updated on each run.
+ type: str
+
+ svm_name:
+ description:
+ - The name of the SVM.
+ type: str
+ version_added: 21.22.0
+
+ ontap_version:
+ description:
+ - The required ONTAP version. Ignored if 'use_latest_version' is set to true.
+ type: str
+ default: 'latest'
+
+ use_latest_version:
+ description:
+ - Indicates whether to use the latest available ONTAP version.
+ type: bool
+ default: true
+
+ platform_serial_number:
+ description:
+ - The serial number for the cluster. This is required when using 'cot-premium-byol'.
+ type: str
+
+ tier_level:
+ description:
+ - The tiering level when 'capacity_tier' is set to 'S3'.
+ choices: ['normal', 'ia', 'ia-single', 'intelligent']
+ default: 'normal'
+ type: str
+
+ cluster_key_pair_name:
+ description:
+ - SSH authentication key pair name
+ type: str
+ version_added: 21.20.0
+
+ nss_account:
+ description:
+ - The NetApp Support Site account ID to use with this Cloud Volumes ONTAP system.
+ - If the license type is BYOL and an NSS account is not provided, Cloud Manager tries to use the first existing NSS account.
+ type: str
+
+ writing_speed_state:
+ description:
+ - The write speed setting for Cloud Volumes ONTAP ['NORMAL','HIGH'].
+ - This argument is not relevant for HA pairs.
+ type: str
+
+ iops:
+ description:
+ - Provisioned IOPS. Required only when provider_volume_type is 'io1' or 'gp3'.
+ type: int
+
+ throughput:
+ description:
+ - Unit is Mb/s. Valid range 125-1000.
+ - Required only when provider_volume_type is 'gp3'.
+ type: int
+
+ capacity_tier:
+ description:
+ - Whether to enable data tiering for the first data aggregate.
+ choices: ['S3', 'NONE']
+ default: 'S3'
+ type: str
+
+ instance_tenancy:
+ description:
+ - The EC2 instance tenancy.
+ choices: ['default', 'dedicated']
+ default: 'default'
+ type: str
+
+ cloud_provider_account:
+ description:
+ - The cloud provider credentials id to use when deploying the Cloud Volumes ONTAP system.
+ - You can find the ID in Cloud Manager from the Settings > Credentials page.
+ - If not specified, Cloud Manager uses the instance profile of the Connector.
+ type: str
+
+ backup_volumes_to_cbs:
+ description:
+ - Automatically enable back up of all volumes to S3.
+ default: false
+ type: bool
+
+ enable_compliance:
+ description:
+ - Enable the Cloud Compliance service on the working environment.
+ default: false
+ type: bool
+
+ enable_monitoring:
+ description:
+ - Enable the Monitoring service on the working environment.
+ default: false
+ type: bool
+
+ optimized_network_utilization:
+ description:
+ - Use optimized network utilization.
+ default: true
+ type: bool
+
+ kms_key_id:
+ description:
+ - Aws Encryption parameters. It is required if using aws encryption. Only one of KMS key id or KMS arn should be specified.
+ type: str
+
+ kms_key_arn:
+ description:
+ - AWS encryption parameters. It is required if using aws encryption. Only one of KMS key id or KMS arn should be specified.
+ type: str
+ version_added: 21.10.0
+
+ aws_tag:
+ description:
+ - Additional tags for the AWS CVO working environment.
+ type: list
+ elements: dict
+ suboptions:
+ tag_key:
+ description: The key of the tag.
+ type: str
+ tag_value:
+ description: The tag value.
+ type: str
+
+ is_ha:
+ description:
+ - Indicate whether the working environment is an HA pair or not.
+ type: bool
+ default: false
+
+ platform_serial_number_node1:
+ description:
+ - For HA BYOL, the serial number for the first node. This is required when using 'ha-cot-premium-byol'.
+ type: str
+
+ platform_serial_number_node2:
+ description:
+ - For HA BYOL, the serial number for the second node. This is required when using 'ha-cot-premium-byol'.
+ type: str
+
+ node1_subnet_id:
+ description:
+ - For HA, the subnet ID of the first node.
+ type: str
+
+ node2_subnet_id:
+ description:
+ - For HA, the subnet ID of the second node.
+ type: str
+
+ mediator_subnet_id:
+ description:
+ - For HA, the subnet ID of the mediator.
+ type: str
+
+ failover_mode:
+ description:
+ - For HA, the failover mode for the HA pair. 'PrivateIP' is for a single availability zone and 'FloatingIP' is for multiple availability zones.
+ type: str
+ choices: ['PrivateIP', 'FloatingIP']
+
+ mediator_assign_public_ip:
+ description:
+ - Boolean option to assign public IP.
+ type: bool
+ default: true
+
+ mediator_key_pair_name:
+ description:
+ - For HA, the key pair name for the mediator instance.
+ type: str
+
+ cluster_floating_ip:
+ description:
+ - For HA FloatingIP, the cluster management floating IP address.
+ type: str
+
+ data_floating_ip:
+ description:
+ - For HA FloatingIP, the data floating IP address.
+ type: str
+
+ data_floating_ip2:
+ description:
+ - For HA FloatingIP, the data floating IP address.
+ type: str
+
+ svm_floating_ip:
+ description:
+ - For HA FloatingIP, the SVM management floating IP address.
+ type: str
+
+ route_table_ids:
+ description:
+ - For HA FloatingIP, the list of route table IDs that will be updated with the floating IPs.
+ type: list
+ elements: str
+
+ upgrade_ontap_version:
+ description:
+ - Indicates whether to upgrade ONTAP image on the CVO.
+ - If the current version already matches the desired version, no action is taken.
+ type: bool
+ default: false
+ version_added: 21.13.0
+
+ update_svm_password:
+ description:
+ - Indicates whether to update svm_password on the CVO.
+ - When set to true, the module is not idempotent, as we cannot read the current password.
+ type: bool
+ default: false
+ version_added: 21.13.0
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = """
+- name: Create NetApp Cloud Manager CVO for AWS single
+ netapp.cloudmanager.na_cloudmanager_cvo_aws:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: AnsibleCVO
+ region: us-west-1
+ subnet_id: subnet-xxxxxxx
+ vpc_id: vpc-xxxxxxxx
+ svm_password: P@assword!
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+ writing_speed_state: NORMAL
+ aws_tag: [
+ {tag_key: abc,
+ tag_value: a123}]
+
+- name: Create NetApp Cloud Manager CVO for AWS HA
+ netapp.cloudmanager.na_cloudmanager_cvo_aws:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: AnsibleCVO
+ region: us-west-1
+ subnet_id: subnet-xxxxxxx
+ vpc_id: vpc-xxxxxxxx
+ svm_password: P@assword!
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+ writing_speed_state: NORMAL
+ aws_tag: [
+ {tag_key: abc,
+ tag_value: a123}]
+ is_ha: true
+ failover_mode: FloatingIP
+ node1_subnet_id: subnet-1
+ node2_subnet_id: subnet-1
+ mediator_subnet_id: subnet-1
+ mediator_key_pair_name: key1
+ cluster_floating_ip: 2.1.1.1
+ data_floating_ip: 2.1.1.2
+ data_floating_ip2: 2.1.1.3
+ svm_floating_ip: 2.1.1.4
+ route_table_ids: [rt-1,rt-2]
+
+- name: Delete NetApp Cloud Manager cvo for AWS
+ netapp.cloudmanager.na_cloudmanager_cvo_aws:
+ state: absent
+ name: ansible
+ region: us-west-1
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ subnet_id: subnet-xxxxxxx
+ vpc_id: vpc-xxxxxxxx
+ svm_password: P@assword!
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+"""
+
+RETURN = '''
+working_environment_id:
+ description: Newly created AWS CVO working_environment_id.
+ type: str
+ returned: success
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+IMPORT_EXCEPTION = None
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_AWS_LIB = True
+except ImportError as exc:
+ HAS_AWS_LIB = False
+ IMPORT_EXCEPTION = exc
+
+AWS_License_Types = ['cot-standard-paygo', 'cot-premium-paygo', 'cot-explore-paygo', 'cot-premium-byol', 'ha-cot-standard-paygo',
+ 'ha-cot-premium-paygo', 'ha-cot-premium-byol', 'ha-cot-explore-paygo', 'capacity-paygo', 'ha-capacity-paygo']
+
+
+class NetAppCloudManagerCVOAWS:
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ instance_type=dict(required=False, type='str', default='m5.2xlarge'),
+ license_type=dict(required=False, type='str', choices=AWS_License_Types, default='capacity-paygo'),
+ workspace_id=dict(required=False, type='str'),
+ subnet_id=dict(required=False, type='str'),
+ vpc_id=dict(required=False, type='str'),
+ region=dict(required=True, type='str'),
+ data_encryption_type=dict(required=False, type='str', choices=['AWS', 'NONE'], default='AWS'),
+ ebs_volume_size=dict(required=False, type='int', default='1'),
+ ebs_volume_size_unit=dict(required=False, type='str', choices=['GB', 'TB'], default='TB'),
+ ebs_volume_type=dict(required=False, type='str', choices=['gp3', 'gp2', 'io1', 'sc1', 'st1'], default='gp2'),
+ svm_password=dict(required=True, type='str', no_log=True),
+ svm_name=dict(required=False, type='str'),
+ ontap_version=dict(required=False, type='str', default='latest'),
+ use_latest_version=dict(required=False, type='bool', default=True),
+ platform_serial_number=dict(required=False, type='str'),
+ capacity_package_name=dict(required=False, type='str', choices=['Professional', 'Essential', 'Freemium'], default='Essential'),
+ provided_license=dict(required=False, type='str'),
+ tier_level=dict(required=False, type='str', choices=['normal', 'ia', 'ia-single', 'intelligent'], default='normal'),
+ cluster_key_pair_name=dict(required=False, type='str'),
+ nss_account=dict(required=False, type='str'),
+ writing_speed_state=dict(required=False, type='str'),
+ iops=dict(required=False, type='int'),
+ throughput=dict(required=False, type='int'),
+ capacity_tier=dict(required=False, type='str', choices=['S3', 'NONE'], default='S3'),
+ instance_tenancy=dict(required=False, type='str', choices=['default', 'dedicated'], default='default'),
+ instance_profile_name=dict(required=False, type='str'),
+ security_group_id=dict(required=False, type='str'),
+ cloud_provider_account=dict(required=False, type='str'),
+ backup_volumes_to_cbs=dict(required=False, type='bool', default=False),
+ enable_compliance=dict(required=False, type='bool', default=False),
+ enable_monitoring=dict(required=False, type='bool', default=False),
+ optimized_network_utilization=dict(required=False, type='bool', default=True),
+ kms_key_id=dict(required=False, type='str', no_log=True),
+ kms_key_arn=dict(required=False, type='str', no_log=True),
+ client_id=dict(required=True, type='str'),
+ aws_tag=dict(required=False, type='list', elements='dict', options=dict(
+ tag_key=dict(type='str', no_log=False),
+ tag_value=dict(type='str')
+ )),
+ is_ha=dict(required=False, type='bool', default=False),
+ platform_serial_number_node1=dict(required=False, type='str'),
+ platform_serial_number_node2=dict(required=False, type='str'),
+ failover_mode=dict(required=False, type='str', choices=['PrivateIP', 'FloatingIP']),
+ mediator_assign_public_ip=dict(required=False, type='bool', default=True),
+ node1_subnet_id=dict(required=False, type='str'),
+ node2_subnet_id=dict(required=False, type='str'),
+ mediator_subnet_id=dict(required=False, type='str'),
+ mediator_key_pair_name=dict(required=False, type='str'),
+ cluster_floating_ip=dict(required=False, type='str'),
+ data_floating_ip=dict(required=False, type='str'),
+ data_floating_ip2=dict(required=False, type='str'),
+ svm_floating_ip=dict(required=False, type='str'),
+ route_table_ids=dict(required=False, type='list', elements='str'),
+ upgrade_ontap_version=dict(required=False, type='bool', default=False),
+ update_svm_password=dict(required=False, type='bool', default=False),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ['ebs_volume_type', 'gp3', ['iops', 'throughput']],
+ ['ebs_volume_type', 'io1', ['iops']],
+ ['license_type', 'cot-premium-byol', ['platform_serial_number']],
+ ['license_type', 'ha-cot-premium-byol', ['platform_serial_number_node1', 'platform_serial_number_node2']],
+ ['license_type', 'capacity-paygo', ['capacity_package_name']],
+ ['license_type', 'ha-capacity-paygo', ['capacity_package_name']],
+ ],
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ mutually_exclusive=[['kms_key_id', 'kms_key_arn']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ supports_check_mode=True,
+ )
+
+ if HAS_AWS_LIB is False:
+ self.module.fail_json(msg="the python AWS library boto3 and botocore is required. Command is pip install boto3."
+ "Import error: %s" % str(IMPORT_EXCEPTION))
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.changeable_params = ['aws_tag', 'svm_password', 'svm_name', 'tier_level', 'ontap_version', 'instance_type', 'license_type', 'writing_speed_state']
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.rest_api.api_root_path = '/occm/api/%s' % ('aws/ha' if self.parameters['is_ha'] else 'vsa')
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+
+ def get_vpc(self):
+ """
+ Get vpc
+ :return: vpc ID
+ """
+ vpc_result = None
+ ec2 = boto3.client('ec2', region_name=self.parameters['region'])
+
+ vpc_input = {'SubnetIds': [self.parameters['subnet_id']]}
+
+ try:
+ vpc_result = ec2.describe_subnets(**vpc_input)
+ except ClientError as error:
+ self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
+
+ return vpc_result['Subnets'][0]['VpcId']
+
+ def create_cvo_aws(self):
+ """ Create AWS CVO """
+ if self.parameters.get('workspace_id') is None:
+ response, msg = self.na_helper.get_tenant(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['workspace_id'] = response
+
+ if self.parameters.get('vpc_id') is None and self.parameters['is_ha'] is False:
+ self.parameters['vpc_id'] = self.get_vpc()
+
+ if self.parameters.get('nss_account') is None:
+ if self.parameters.get('platform_serial_number') is not None:
+ if not self.parameters['platform_serial_number'].startswith('Eval-') and self.parameters['license_type'] == 'cot-premium-byol':
+ response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['nss_account'] = response
+ elif self.parameters.get('platform_serial_number_node1') is not None and self.parameters.get('platform_serial_number_node2') is not None:
+ if not self.parameters['platform_serial_number_node1'].startswith('Eval-')\
+ and not self.parameters['platform_serial_number_node2'].startswith('Eval-')\
+ and self.parameters['license_type'] == 'ha-cot-premium-byol':
+ response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['nss_account'] = response
+
+ json = {"name": self.parameters['name'],
+ "region": self.parameters['region'],
+ "tenantId": self.parameters['workspace_id'],
+ "vpcId": self.parameters['vpc_id'],
+ "dataEncryptionType": self.parameters['data_encryption_type'],
+ "ebsVolumeSize": {
+ "size": self.parameters['ebs_volume_size'],
+ "unit": self.parameters['ebs_volume_size_unit']},
+ "ebsVolumeType": self.parameters['ebs_volume_type'],
+ "svmPassword": self.parameters['svm_password'],
+ "backupVolumesToCbs": self.parameters['backup_volumes_to_cbs'],
+ "enableCompliance": self.parameters['enable_compliance'],
+ "enableMonitoring": self.parameters['enable_monitoring'],
+ "optimizedNetworkUtilization": self.parameters['optimized_network_utilization'],
+ "vsaMetadata": {
+ "ontapVersion": self.parameters['ontap_version'],
+ "licenseType": self.parameters['license_type'],
+ "useLatestVersion": self.parameters['use_latest_version'],
+ "instanceType": self.parameters['instance_type']},
+ }
+
+ if self.parameters['capacity_tier'] == "S3":
+ json.update({"capacityTier": self.parameters['capacity_tier'],
+ "tierLevel": self.parameters['tier_level']})
+
+ # clean default value if it is not by Capacity license
+ if not self.parameters['license_type'].endswith('capacity-paygo'):
+ json['vsaMetadata'].update({"capacityPackageName": ''})
+
+ if self.parameters.get('platform_serial_number') is not None:
+ json['vsaMetadata'].update({"platformSerialNumber": self.parameters['platform_serial_number']})
+
+ if self.parameters.get('provided_license') is not None:
+ json['vsaMetadata'].update({"providedLicense": self.parameters['provided_license']})
+
+ if self.parameters.get('capacity_package_name') is not None:
+ json['vsaMetadata'].update({"capacityPackageName": self.parameters['capacity_package_name']})
+
+ if self.parameters.get('writing_speed_state') is not None:
+ json.update({"writingSpeedState": self.parameters['writing_speed_state'].upper()})
+
+ if self.parameters.get('iops') is not None:
+ json.update({"iops": self.parameters['iops']})
+
+ if self.parameters.get('throughput') is not None:
+ json.update({"throughput": self.parameters['throughput']})
+
+ if self.parameters.get('cluster_key_pair_name') is not None:
+ json.update({"clusterKeyPairName": self.parameters['cluster_key_pair_name']})
+
+ if self.parameters.get('instance_tenancy') is not None:
+ json.update({"instanceTenancy": self.parameters['instance_tenancy']})
+
+ if self.parameters.get('instance_profile_name') is not None:
+ json.update({"instanceProfileName": self.parameters['instance_profile_name']})
+
+ if self.parameters.get('security_group_id') is not None:
+ json.update({"securityGroupId": self.parameters['security_group_id']})
+
+ if self.parameters.get('cloud_provider_account') is not None:
+ json.update({"cloudProviderAccount": self.parameters['cloud_provider_account']})
+
+ if self.parameters.get('backup_volumes_to_cbs') is not None:
+ json.update({"backupVolumesToCbs": self.parameters['backup_volumes_to_cbs']})
+
+ if self.parameters.get('svm_name') is not None:
+ json.update({"svmName": self.parameters['svm_name']})
+
+ if self.parameters['data_encryption_type'] == "AWS":
+ if self.parameters.get('kms_key_id') is not None:
+ json.update({"awsEncryptionParameters": {"kmsKeyId": self.parameters['kms_key_id']}})
+ if self.parameters.get('kms_key_arn') is not None:
+ json.update({"awsEncryptionParameters": {"kmsKeyArn": self.parameters['kms_key_arn']}})
+
+ if self.parameters.get('aws_tag') is not None:
+ tags = []
+ for each_tag in self.parameters['aws_tag']:
+ tag = {
+ 'tagKey': each_tag['tag_key'],
+ 'tagValue': each_tag['tag_value']
+ }
+
+ tags.append(tag)
+ json.update({"awsTags": tags})
+
+ if self.parameters['is_ha'] is True:
+ ha_params = dict({
+ "mediatorAssignPublicIP": self.parameters['mediator_assign_public_ip']
+ })
+
+ if self.parameters.get('failover_mode'):
+ ha_params["failoverMode"] = self.parameters['failover_mode']
+
+ if self.parameters.get('node1_subnet_id'):
+ ha_params["node1SubnetId"] = self.parameters['node1_subnet_id']
+
+ if self.parameters.get('node2_subnet_id'):
+ ha_params["node2SubnetId"] = self.parameters['node2_subnet_id']
+
+ if self.parameters.get('mediator_subnet_id'):
+ ha_params["mediatorSubnetId"] = self.parameters['mediator_subnet_id']
+
+ if self.parameters.get('mediator_key_pair_name'):
+ ha_params["mediatorKeyPairName"] = self.parameters['mediator_key_pair_name']
+
+ if self.parameters.get('cluster_floating_ip'):
+ ha_params["clusterFloatingIP"] = self.parameters['cluster_floating_ip']
+
+ if self.parameters.get('data_floating_ip'):
+ ha_params["dataFloatingIP"] = self.parameters['data_floating_ip']
+
+ if self.parameters.get('data_floating_ip2'):
+ ha_params["dataFloatingIP2"] = self.parameters['data_floating_ip2']
+
+ if self.parameters.get('svm_floating_ip'):
+ ha_params["svmFloatingIP"] = self.parameters['svm_floating_ip']
+
+ if self.parameters.get('route_table_ids'):
+ ha_params["routeTableIds"] = self.parameters['route_table_ids']
+
+ if self.parameters.get('platform_serial_number_node1'):
+ ha_params["platformSerialNumberNode1"] = self.parameters['platform_serial_number_node1']
+
+ if self.parameters.get('platform_serial_number_node2'):
+ ha_params["platformSerialNumberNode2"] = self.parameters['platform_serial_number_node2']
+
+ json["haParams"] = ha_params
+
+ else:
+ json["subnetId"] = self.parameters['subnet_id']
+
+ api_url = '%s/working-environments' % self.rest_api.api_root_path
+ response, error, on_cloud_request_id = self.rest_api.post(api_url, json, header=self.headers)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: unexpected response on creating cvo aws: %s, %s" % (str(error), str(response)))
+ working_environment_id = response['publicId']
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
+
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "create", 60, 60)
+
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating CVO AWS: %s" % str(err))
+
+ return working_environment_id
+
+ def update_cvo_aws(self, working_environment_id, modify):
+ base_url = '%s/working-environments/%s/' % (self.rest_api.api_root_path, working_environment_id)
+ for item in modify:
+ if item == 'svm_password':
+ response, error = self.na_helper.update_svm_password(base_url, self.rest_api, self.headers, self.parameters['svm_password'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'svm_name':
+ response, error = self.na_helper.update_svm_name(base_url, self.rest_api, self.headers, self.parameters['svm_name'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'aws_tag':
+ tag_list = None
+ if 'aws_tag' in self.parameters:
+ tag_list = self.parameters['aws_tag']
+ response, error = self.na_helper.update_cvo_tags(base_url, self.rest_api, self.headers, 'aws_tag', tag_list)
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'tier_level':
+ response, error = self.na_helper.update_tier_level(base_url, self.rest_api, self.headers, self.parameters['tier_level'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'writing_speed_state':
+ response, error = self.na_helper.update_writing_speed_state(base_url, self.rest_api, self.headers, self.parameters['writing_speed_state'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'ontap_version':
+ response, error = self.na_helper.upgrade_ontap_image(self.rest_api, self.headers, self.parameters['ontap_version'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'instance_type' or item == 'license_type':
+ response, error = self.na_helper.update_instance_license_type(base_url, self.rest_api, self.headers,
+ self.parameters['instance_type'],
+ self.parameters['license_type'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ def delete_cvo_aws(self, we_id):
+ """
+ Delete AWS CVO
+ """
+ api_url = '%s/working-environments/%s' % (self.rest_api.api_root_path, we_id)
+ response, error, on_cloud_request_id = self.rest_api.delete(api_url, None, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on deleting cvo aws: %s, %s" % (str(error), str(response)))
+
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "delete", 40, 60)
+
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response wait_on_completion for deleting CVO AWS: %s" % str(err))
+
+ def validate_cvo_params(self):
+ if self.parameters['use_latest_version'] is True and self.parameters['ontap_version'] != "latest":
+ self.module.fail_json(msg="ontap_version parameter not required when having use_latest_version as true")
+
+ if self.parameters['is_ha'] is True and self.parameters['license_type'] == "ha-cot-premium-byol":
+ if self.parameters.get('platform_serial_number_node1') is None or self.parameters.get('platform_serial_number_node2') is None:
+ self.module.fail_json(msg="both platform_serial_number_node1 and platform_serial_number_node2 parameters are required"
+ "when having ha type as true and license_type as ha-cot-premium-byol")
+
+ if self.parameters['is_ha'] is True and self.parameters['license_type'] == 'capacity-paygo':
+ self.parameters['license_type'] = 'ha-capacity-paygo'
+
+ def apply(self):
+ """
+ Apply action to the Cloud Manager CVO for AWS
+ :return: None
+ """
+ working_environment_id = None
+ modify = None
+ current, dummy = self.na_helper.get_working_environment_details_by_name(self.rest_api, self.headers,
+ self.parameters['name'], "aws")
+ if current:
+ self.parameters['working_environment_id'] = current['publicId']
+ # check the action
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if current and self.parameters['state'] != 'absent':
+ # Check mandatory parameters
+ self.validate_cvo_params()
+ working_environment_id = current['publicId']
+ modify, error = self.na_helper.is_cvo_update_needed(self.rest_api, self.headers, self.parameters, self.changeable_params, 'aws')
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == "create":
+ self.validate_cvo_params()
+ working_environment_id = self.create_cvo_aws()
+ elif cd_action == "delete":
+ self.delete_cvo_aws(current['publicId'])
+ else:
+ self.update_cvo_aws(current['publicId'], modify)
+
+ self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id)
+
+
+def main():
+ """
+ Create Cloud Manager CVO for AWS class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppCloudManagerCVOAWS()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_azure.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_azure.py
new file mode 100644
index 000000000..3212323e0
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_azure.py
@@ -0,0 +1,746 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_cvo_azure
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_cvo_azure
+short_description: NetApp Cloud Manager CVO/working environment in single or HA mode for Azure.
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, delete, or manage Cloud Manager CVO/working environment in single or HA mode for Azure.
+
+options:
+
+ state:
+ description:
+ - Whether the specified Cloud Manager CVO for AZURE should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ required: true
+ description:
+ - The name of the Cloud Manager CVO for AZURE to manage.
+ type: str
+
+ subscription_id:
+ required: true
+ description:
+ - The ID of the Azure subscription.
+ type: str
+
+ instance_type:
+ description:
+ - The type of instance to use, which depends on the license type you chose.
+ - Explore ['Standard_DS3_v2'].
+ - Standard ['Standard_DS4_v2, Standard_DS13_v2, Standard_L8s_v2'].
+ - Premium ['Standard_DS5_v2', 'Standard_DS14_v2'].
+ - For more supported instance types, refer to Cloud Volumes ONTAP Release Notes.
+ type: str
+ default: Standard_DS4_v2
+
+ license_type:
+ description:
+ - The type of license to use.
+ - For single node by Capacity ['capacity-paygo'].
+ - For single node by Node paygo ['azure-cot-explore-paygo', 'azure-cot-standard-paygo', 'azure-cot-premium-paygo'].
+ - For single node by Node byol ['azure-cot-premium-byol'].
+ - For HA by Capacity ['ha-capacity-paygo'].
+ - For HA by Node paygo ['azure-ha-cot-standard-paygo', 'azure-ha-cot-premium-paygo'].
+ - For HA by Node byol ['azure-ha-cot-premium-byol'].
+ choices: ['azure-cot-standard-paygo', 'azure-cot-premium-paygo', 'azure-cot-premium-byol', \
+ 'azure-cot-explore-paygo', 'azure-ha-cot-standard-paygo', 'azure-ha-cot-premium-paygo', \
+ 'azure-ha-cot-premium-byol', 'capacity-paygo', 'ha-capacity-paygo']
+ default: 'capacity-paygo'
+ type: str
+
+ provided_license:
+ description:
+ - Using a NLF license file for BYOL deployment.
+ type: str
+
+ capacity_package_name:
+ description:
+ - Capacity package name is required when selecting a capacity based license.
+ - Essential only available with Bring Your Own License Capacity-Based.
+ - Professional available as an annual contract from a cloud provider or Bring Your Own License Capacity-Based.
+ choices: ['Professional', 'Essential', 'Freemium']
+ default: 'Essential'
+ type: str
+ version_added: 21.12.0
+
+ workspace_id:
+ description:
+ - The ID of the Cloud Manager workspace where you want to deploy Cloud Volumes ONTAP.
+ - If not provided, Cloud Manager uses the first workspace.
+ - You can find the ID from the Workspace tab on [https://cloudmanager.netapp.com].
+ type: str
+
+ subnet_id:
+ required: true
+ description:
+ - The name of the subnet for the Cloud Volumes ONTAP system.
+ type: str
+
+ vnet_id:
+ required: true
+ description:
+ - The name of the virtual network.
+ type: str
+
+ vnet_resource_group:
+ description:
+ - The resource group in Azure associated to the virtual network.
+ type: str
+
+ resource_group:
+ description:
+ - The resource_group where Cloud Volumes ONTAP will be created.
+ - If not provided, Cloud Manager generates the resource group name (name of the working environment/CVO with suffix '-rg').
+ - If the resource group does not exist, it is created.
+ type: str
+
+ allow_deploy_in_existing_rg:
+ description:
+ - Indicates if to allow creation in existing resource group.
+ type: bool
+ default: false
+
+ cidr:
+ required: true
+ description:
+ - The CIDR of the VNET. If not provided, resource needs az login to authorize and fetch the cidr details from Azure.
+ type: str
+
+ location:
+ required: true
+ description:
+ - The location where the working environment will be created.
+ type: str
+
+ data_encryption_type:
+ description:
+ - The type of encryption to use for the working environment.
+ choices: ['AZURE', 'NONE']
+ default: 'AZURE'
+ type: str
+
+ azure_encryption_parameters:
+ description:
+ - AZURE encryption parameters. It is required if using AZURE encryption.
+ type: str
+ version_added: 21.10.0
+
+ storage_type:
+ description:
+ - The type of storage for the first data aggregate.
+ choices: ['Premium_LRS', 'Standard_LRS', 'StandardSSD_LRS', 'Premium_ZRS']
+ default: 'Premium_LRS'
+ type: str
+
+ client_id:
+ required: true
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ - You can find the ID from the Connector tab on [https://cloudmanager.netapp.com].
+ type: str
+
+ disk_size:
+ description:
+ - Azure volume size for the first data aggregate.
+ - For GB, the value can be [100, 500].
+ - For TB, the value can be [1,2,4,8,16].
+ default: 1
+ type: int
+
+ disk_size_unit:
+ description:
+ - The unit for disk size.
+ choices: ['GB', 'TB']
+ default: 'TB'
+ type: str
+
+ security_group_id:
+ description:
+ - The ID of the security group for the working environment. If not provided, Cloud Manager creates the security group.
+ type: str
+
+ svm_password:
+ required: true
+ description:
+ - The admin password for Cloud Volumes ONTAP.
+ - It will be updated on each run.
+ type: str
+
+ svm_name:
+ description:
+ - The name of the SVM.
+ type: str
+ version_added: 21.22.0
+
+ ontap_version:
+ description:
+ - The required ONTAP version. Ignored if 'use_latest_version' is set to true.
+ type: str
+ default: 'latest'
+
+ use_latest_version:
+ description:
+ - Indicates whether to use the latest available ONTAP version.
+ type: bool
+ default: true
+
+ serial_number:
+ description:
+ - The serial number for the cluster.
+ - Required when using one of these, 'azure-cot-premium-byol' or 'azure-ha-cot-premium-byol'.
+ type: str
+
+ tier_level:
+ description:
+ - If capacity_tier is Blob, this argument indicates the tiering level.
+ choices: ['normal', 'cool']
+ default: 'normal'
+ type: str
+
+ nss_account:
+ description:
+ - The NetApp Support Site account ID to use with this Cloud Volumes ONTAP system.
+ - If the license type is BYOL and an NSS account isn't provided, Cloud Manager tries to use the first existing NSS account.
+ type: str
+
+ writing_speed_state:
+ description:
+ - The write speed setting for Cloud Volumes ONTAP ['NORMAL','HIGH'].
+ - This argument is not relevant for HA pairs.
+ type: str
+
+ capacity_tier:
+ description:
+ - Whether to enable data tiering for the first data aggregate.
+ choices: ['Blob', 'NONE']
+ default: 'Blob'
+ type: str
+
+ cloud_provider_account:
+ description:
+ - The cloud provider credentials id to use when deploying the Cloud Volumes ONTAP system.
+ - You can find the ID in Cloud Manager from the Settings > Credentials page.
+ - If not specified, Cloud Manager uses the instance profile of the Connector.
+ type: str
+
+ backup_volumes_to_cbs:
+ description:
+ - Automatically enable back up of all volumes to S3.
+ default: false
+ type: bool
+
+ enable_compliance:
+ description:
+ - Enable the Cloud Compliance service on the working environment.
+ default: false
+ type: bool
+
+ enable_monitoring:
+ description:
+ - Enable the Monitoring service on the working environment.
+ default: false
+ type: bool
+
+ azure_tag:
+ description:
+ - Additional tags for the AZURE CVO working environment.
+ type: list
+ elements: dict
+ suboptions:
+ tag_key:
+ description: The key of the tag.
+ type: str
+ tag_value:
+ description: The tag value.
+ type: str
+ is_ha:
+ description:
+ - Indicate whether the working environment is an HA pair or not.
+ type: bool
+ default: false
+
+ platform_serial_number_node1:
+ description:
+ - For HA BYOL, the serial number for the first node.
+ type: str
+
+ platform_serial_number_node2:
+ description:
+ - For HA BYOL, the serial number for the second node.
+ type: str
+
+ ha_enable_https:
+ description:
+ - For HA, enable the HTTPS connection from CVO to storage accounts. This can impact write performance. The default is false.
+ type: bool
+ version_added: 21.10.0
+
+ upgrade_ontap_version:
+ description:
+ - Indicates whether to upgrade ONTAP image on the CVO.
+ - If the current version already matches the desired version, no action is taken.
+ type: bool
+ default: false
+ version_added: 21.13.0
+
+ update_svm_password:
+ description:
+ - Indicates whether to update svm_password on the CVO.
+ - When set to true, the module is not idempotent, as we cannot read the current password.
+ type: bool
+ default: false
+ version_added: 21.13.0
+
+ availability_zone:
+ description:
+ - The availability zone on the location configuration.
+ type: int
+ version_added: 21.20.0
+
+ availability_zone_node1:
+ description:
+ - The node1 availability zone on the location configuration for HA.
+ type: int
+ version_added: 21.21.0
+
+ availability_zone_node2:
+ description:
+ - The node2 availability zone on the location configuration for HA.
+ type: int
+ version_added: 21.21.0
+'''
+
+EXAMPLES = """
+- name: create NetApp Cloud Manager CVO for Azure single
+ netapp.cloudmanager.na_cloudmanager_cvo_azure:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: AnsibleCVO
+ location: westus
+ subnet_id: subnet-xxxxxxx
+ vnet_id: vnetxxxxxxxx
+ svm_password: P@assword!
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+ writing_speed_state: NORMAL
+ azure_tag: [
+ {tag_key: abc,
+ tag_value: a123}]
+
+- name: create NetApp Cloud Manager CVO for Azure HA
+ netapp.cloudmanager.na_cloudmanager_cvo_azure:
+ state: present
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ name: AnsibleCVO
+ location: westus
+ subnet_id: subnet-xxxxxxx
+ vnet_id: vnetxxxxxxxx
+ svm_password: P@assword!
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+ writing_speed_state: NORMAL
+ azure_tag: [
+ {tag_key: abc,
+ tag_value: a123}]
+ is_ha: true
+
+- name: delete NetApp Cloud Manager cvo for Azure
+ netapp.cloudmanager.na_cloudmanager_cvo_azure:
+ state: absent
+ name: ansible
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ location: westus
+ subnet_id: subnet-xxxxxxx
+ vnet_id: vnetxxxxxxxx
+ svm_password: P@assword!
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+"""
+
+RETURN = '''
+working_environment_id:
+ description: Newly created AZURE CVO working_environment_id.
+ type: str
+ returned: success
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+
+AZURE_License_Types = ['azure-cot-standard-paygo', 'azure-cot-premium-paygo', 'azure-cot-premium-byol', 'azure-cot-explore-paygo',
+ 'azure-ha-cot-standard-paygo', 'azure-ha-cot-premium-paygo', 'azure-ha-cot-premium-byol', 'capacity-paygo', 'ha-capacity-paygo']
+
+
+class NetAppCloudManagerCVOAZURE:
+ """ object initialize and class methods """
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ instance_type=dict(required=False, type='str', default='Standard_DS4_v2'),
+ license_type=dict(required=False, type='str', choices=AZURE_License_Types, default='capacity-paygo'),
+ workspace_id=dict(required=False, type='str'),
+ capacity_package_name=dict(required=False, type='str', choices=['Professional', 'Essential', 'Freemium'], default='Essential'),
+ provided_license=dict(required=False, type='str'),
+ subnet_id=dict(required=True, type='str'),
+ vnet_id=dict(required=True, type='str'),
+ vnet_resource_group=dict(required=False, type='str'),
+ resource_group=dict(required=False, type='str'),
+ cidr=dict(required=True, type='str'),
+ location=dict(required=True, type='str'),
+ subscription_id=dict(required=True, type='str'),
+ data_encryption_type=dict(required=False, type='str', choices=['AZURE', 'NONE'], default='AZURE'),
+ azure_encryption_parameters=dict(required=False, type='str', no_log=True),
+ storage_type=dict(required=False, type='str', choices=['Premium_LRS', 'Standard_LRS', 'StandardSSD_LRS', 'Premium_ZRS'], default='Premium_LRS'),
+ disk_size=dict(required=False, type='int', default=1),
+ disk_size_unit=dict(required=False, type='str', choices=['GB', 'TB'], default='TB'),
+ svm_password=dict(required=True, type='str', no_log=True),
+ svm_name=dict(required=False, type='str'),
+ ontap_version=dict(required=False, type='str', default='latest'),
+ use_latest_version=dict(required=False, type='bool', default=True),
+ tier_level=dict(required=False, type='str', choices=['normal', 'cool'], default='normal'),
+ nss_account=dict(required=False, type='str'),
+ writing_speed_state=dict(required=False, type='str'),
+ capacity_tier=dict(required=False, type='str', choices=['Blob', 'NONE'], default='Blob'),
+ security_group_id=dict(required=False, type='str'),
+ cloud_provider_account=dict(required=False, type='str'),
+ backup_volumes_to_cbs=dict(required=False, type='bool', default=False),
+ enable_compliance=dict(required=False, type='bool', default=False),
+ enable_monitoring=dict(required=False, type='bool', default=False),
+ allow_deploy_in_existing_rg=dict(required=False, type='bool', default=False),
+ client_id=dict(required=True, type='str'),
+ azure_tag=dict(required=False, type='list', elements='dict', options=dict(
+ tag_key=dict(type='str', no_log=False),
+ tag_value=dict(type='str')
+ )),
+ serial_number=dict(required=False, type='str'),
+ is_ha=dict(required=False, type='bool', default=False),
+ platform_serial_number_node1=dict(required=False, type='str'),
+ platform_serial_number_node2=dict(required=False, type='str'),
+ ha_enable_https=dict(required=False, type='bool'),
+ upgrade_ontap_version=dict(required=False, type='bool', default=False),
+ update_svm_password=dict(required=False, type='bool', default=False),
+ availability_zone=dict(required=False, type='int'),
+ availability_zone_node1=dict(required=False, type='int'),
+ availability_zone_node2=dict(required=False, type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ required_if=[
+ ['license_type', 'capacity-paygo', ['capacity_package_name']],
+ ['license_type', 'ha-capacity-paygo', ['capacity_package_name']],
+ ['license_type', 'azure-cot-premium-byol', ['serial_number']],
+ ['license_type', 'azure-ha-cot-premium-byol', ['platform_serial_number_node1', 'platform_serial_number_node2']],
+ ],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.changeable_params = ['svm_password', 'svm_name', 'azure_tag', 'tier_level', 'ontap_version',
+ 'instance_type', 'license_type', 'writing_speed_state']
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.rest_api.api_root_path = '/occm/api/azure/%s' % ('ha' if self.parameters['is_ha'] else 'vsa')
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+
+ def create_cvo_azure(self):
+ """
+ Create AZURE CVO
+ """
+ if self.parameters.get('workspace_id') is None:
+ response, msg = self.na_helper.get_tenant(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['workspace_id'] = response
+
+ if self.parameters.get('nss_account') is None:
+ if self.parameters.get('serial_number') is not None:
+ if not self.parameters['serial_number'].startswith('Eval-') and self.parameters['license_type'] == 'azure-cot-premium-byol':
+ response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['nss_account'] = response
+ elif self.parameters.get('platform_serial_number_node1') is not None and self.parameters.get('platform_serial_number_node2') is not None:
+ if not self.parameters['platform_serial_number_node1'].startswith('Eval-')\
+ and not self.parameters['platform_serial_number_node2'].startswith('Eval-')\
+ and self.parameters['license_type'] == 'azure-ha-cot-premium-byol':
+ response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['nss_account'] = response
+
+ json = {"name": self.parameters['name'],
+ "region": self.parameters['location'],
+ "subscriptionId": self.parameters['subscription_id'],
+ "tenantId": self.parameters['workspace_id'],
+ "storageType": self.parameters['storage_type'],
+ "dataEncryptionType": self.parameters['data_encryption_type'],
+ "optimizedNetworkUtilization": True,
+ "diskSize": {
+ "size": self.parameters['disk_size'],
+ "unit": self.parameters['disk_size_unit']},
+ "svmPassword": self.parameters['svm_password'],
+ "backupVolumesToCbs": self.parameters['backup_volumes_to_cbs'],
+ "enableCompliance": self.parameters['enable_compliance'],
+ "enableMonitoring": self.parameters['enable_monitoring'],
+ "vsaMetadata": {
+ "ontapVersion": self.parameters['ontap_version'],
+ "licenseType": self.parameters['license_type'],
+ "useLatestVersion": self.parameters['use_latest_version'],
+ "instanceType": self.parameters['instance_type']}
+ }
+
+ if self.parameters['capacity_tier'] == "Blob":
+ json.update({"capacityTier": self.parameters['capacity_tier'],
+ "tierLevel": self.parameters['tier_level']})
+
+ if self.parameters.get('provided_license') is not None:
+ json['vsaMetadata'].update({"providedLicense": self.parameters['provided_license']})
+
+ # clean default value if it is not by Capacity license
+ if not self.parameters['license_type'].endswith('capacity-paygo'):
+ json['vsaMetadata'].update({"capacityPackageName": ''})
+
+ if self.parameters.get('capacity_package_name') is not None:
+ json['vsaMetadata'].update({"capacityPackageName": self.parameters['capacity_package_name']})
+
+ if self.parameters.get('cidr') is not None:
+ json.update({"cidr": self.parameters['cidr']})
+
+ if self.parameters.get('writing_speed_state') is not None:
+ json.update({"writingSpeedState": self.parameters['writing_speed_state'].upper()})
+
+ if self.parameters.get('resource_group') is not None:
+ json.update({"resourceGroup": self.parameters['resource_group'],
+ "allowDeployInExistingRg": self.parameters['allow_deploy_in_existing_rg']})
+ else:
+ json.update({"resourceGroup": (self.parameters['name'] + '-rg')})
+
+ if self.parameters.get('serial_number') is not None:
+ json.update({"serialNumber": self.parameters['serial_number']})
+
+ if self.parameters.get('security_group_id') is not None:
+ json.update({"securityGroupId": self.parameters['security_group_id']})
+
+ if self.parameters.get('cloud_provider_account') is not None:
+ json.update({"cloudProviderAccount": self.parameters['cloud_provider_account']})
+
+ if self.parameters.get('backup_volumes_to_cbs') is not None:
+ json.update({"backupVolumesToCbs": self.parameters['backup_volumes_to_cbs']})
+
+ if self.parameters.get('nss_account') is not None:
+ json.update({"nssAccount": self.parameters['nss_account']})
+
+ if self.parameters.get('availability_zone') is not None:
+ json.update({"availabilityZone": self.parameters['availability_zone']})
+
+ if self.parameters['data_encryption_type'] == "AZURE":
+ if self.parameters.get('azure_encryption_parameters') is not None:
+ json.update({"azureEncryptionParameters": {"key": self.parameters['azure_encryption_parameters']}})
+
+ if self.parameters.get('svm_name') is not None:
+ json.update({"svmName": self.parameters['svm_name']})
+
+ if self.parameters.get('azure_tag') is not None:
+ tags = []
+ for each_tag in self.parameters['azure_tag']:
+ tag = {
+ 'tagKey': each_tag['tag_key'],
+ 'tagValue': each_tag['tag_value']
+ }
+
+ tags.append(tag)
+ json.update({"azureTags": tags})
+
+ if self.parameters['is_ha']:
+ ha_params = dict()
+
+ if self.parameters.get('platform_serial_number_node1'):
+ ha_params["platformSerialNumberNode1"] = self.parameters['platform_serial_number_node1']
+
+ if self.parameters.get('platform_serial_number_node2'):
+ ha_params["platformSerialNumberNode2"] = self.parameters['platform_serial_number_node2']
+
+ if self.parameters.get('availability_zone_node1'):
+ ha_params["availabilityZoneNode1"] = self.parameters['availability_zone_node1']
+
+ if self.parameters.get('availability_zone_node2'):
+ ha_params["availabilityZoneNode2"] = self.parameters['availability_zone_node2']
+
+ if self.parameters.get('ha_enable_https') is not None:
+ ha_params['enableHttps'] = self.parameters['ha_enable_https']
+
+ json["haParams"] = ha_params
+
+ resource_group = self.parameters['vnet_resource_group'] if self.parameters.get(
+ 'vnet_resource_group') is not None else self.parameters['resource_group']
+
+ resource_group_path = 'subscriptions/%s/resourceGroups/%s' % (self.parameters['subscription_id'], resource_group)
+ vnet_format = '%s/%s' if self.rest_api.simulator else '/%s/providers/Microsoft.Network/virtualNetworks/%s'
+ vnet = vnet_format % (resource_group_path, self.parameters['vnet_id'])
+ json.update({"vnetId": vnet})
+ json.update({"subnetId": '%s/subnets/%s' % (vnet, self.parameters['subnet_id'])})
+
+ api_url = '%s/working-environments' % self.rest_api.api_root_path
+ response, error, on_cloud_request_id = self.rest_api.post(api_url, json, header=self.headers)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: unexpected response on creating cvo azure: %s, %s" % (str(error), str(response)))
+ working_environment_id = response['publicId']
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "create", 60, 60)
+
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating CVO AZURE: %s" % str(err))
+
+ return working_environment_id
+
+ def get_extra_azure_tags(self, rest_api, headers):
+ # Get extra azure tag from current working environment
+ # It is created automatically not from the user input
+ we, err = self.na_helper.get_working_environment_details(rest_api, headers)
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response to get CVO AZURE details: %s" % str(err))
+ return [{'tag_key': 'DeployedByOccm', 'tag_value': we['userTags']['DeployedByOccm']}] if 'DeployedByOccm' in \
+ we['userTags'] else []
+
+ def update_cvo_azure(self, working_environment_id, modify):
+ base_url = '%s/working-environments/%s/' % (self.rest_api.api_root_path, working_environment_id)
+ for item in modify:
+ if item == 'svm_password':
+ response, error = self.na_helper.update_svm_password(base_url, self.rest_api, self.headers, self.parameters['svm_password'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'svm_name':
+ response, error = self.na_helper.update_svm_name(base_url, self.rest_api, self.headers, self.parameters['svm_name'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'azure_tag':
+ # default azure tag
+ tag_list = self.get_extra_azure_tags(self.rest_api, self.headers)
+ if 'azure_tag' in self.parameters:
+ tag_list.extend(self.parameters['azure_tag'])
+ response, error = self.na_helper.update_cvo_tags(base_url, self.rest_api, self.headers, 'azure_tag', tag_list)
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'tier_level':
+ response, error = self.na_helper.update_tier_level(base_url, self.rest_api, self.headers, self.parameters['tier_level'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'writing_speed_state':
+ response, error = self.na_helper.update_writing_speed_state(base_url, self.rest_api, self.headers, self.parameters['writing_speed_state'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'ontap_version':
+ response, error = self.na_helper.upgrade_ontap_image(self.rest_api, self.headers, self.parameters['ontap_version'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'instance_type' or item == 'license_type':
+ response, error = self.na_helper.update_instance_license_type(base_url, self.rest_api, self.headers,
+ self.parameters['instance_type'],
+ self.parameters['license_type'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ def delete_cvo_azure(self, we_id):
+ """
+ Delete AZURE CVO
+ """
+
+ api_url = '%s/working-environments/%s' % (self.rest_api.api_root_path, we_id)
+ response, error, on_cloud_request_id = self.rest_api.delete(api_url, None, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on deleting cvo azure: %s, %s" % (str(error), str(response)))
+
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "delete", 40, 60)
+
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response wait_on_completion for deleting CVO AZURE: %s" % str(err))
+
+ def validate_cvo_params(self):
+ if self.parameters['use_latest_version'] is True and self.parameters['ontap_version'] != "latest":
+ self.module.fail_json(msg="ontap_version parameter not required when having use_latest_version as true")
+
+ if self.parameters.get('serial_number') is None and self.parameters['license_type'] == "azure-cot-premium-byol":
+ self.module.fail_json(msg="serial_number parameter required when having license_type as azure-cot-premium-byol")
+
+ if self.parameters['is_ha'] and self.parameters['license_type'] == "azure-ha-cot-premium-byol":
+ if self.parameters.get('platform_serial_number_node1') is None or self.parameters.get('platform_serial_number_node2') is None:
+ self.module.fail_json(msg="both platform_serial_number_node1 and platform_serial_number_node2 parameters are required"
+ "when having ha type as true and license_type as azure-ha-cot-premium-byol")
+ if self.parameters['is_ha'] is True and self.parameters['license_type'] == 'capacity-paygo':
+ self.parameters['license_type'] == 'ha-capacity-paygo'
+
+ def apply(self):
+ """
+ Apply action to the Cloud Manager CVO for AZURE
+ :return: None
+ """
+ working_environment_id = None
+ modify = None
+ current, dummy = self.na_helper.get_working_environment_details_by_name(self.rest_api, self.headers,
+ self.parameters['name'], "azure")
+ if current:
+ self.parameters['working_environment_id'] = current['publicId']
+ # check the action whether to create, delete, or not
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if current and self.parameters['state'] != 'absent':
+ working_environment_id = current['publicId']
+ modify, error = self.na_helper.is_cvo_update_needed(self.rest_api, self.headers, self.parameters, self.changeable_params, 'azure')
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == "create":
+ self.validate_cvo_params()
+ working_environment_id = self.create_cvo_azure()
+ elif cd_action == "delete":
+ self.delete_cvo_azure(current['publicId'])
+ else:
+ self.update_cvo_azure(current['publicId'], modify)
+
+ self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id)
+
+
+def main():
+ """
+ Create Cloud Manager CVO for AZURE class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppCloudManagerCVOAZURE()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_gcp.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_gcp.py
new file mode 100644
index 000000000..7abbca823
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_cvo_gcp.py
@@ -0,0 +1,858 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_cvo_gcp
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_cvo_gcp
+short_description: NetApp Cloud Manager CVO for GCP
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - Create, delete, or manage Cloud Manager CVO for GCP.
+
+options:
+
+ backup_volumes_to_cbs:
+ description:
+ - Automatically backup all volumes to cloud.
+ default: false
+ type: bool
+
+ capacity_tier:
+ description:
+ - Whether to enable data tiering for the first data aggregate.
+ choices: ['cloudStorage']
+ type: str
+
+ client_id:
+ required: true
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ - You can find the ID from the Connector tab on U(https://cloudmanager.netapp.com).
+ type: str
+
+ data_encryption_type:
+ description:
+ - Type of encryption to use for this working environment.
+ choices: ['GCP']
+ type: str
+
+ gcp_encryption_parameters:
+ description:
+ - The GCP encryption parameters.
+ type: str
+ version_added: 21.10.0
+
+ enable_compliance:
+ description:
+ - Enable the Cloud Compliance service on the working environment.
+ default: false
+ type: bool
+
+ firewall_rule:
+ description:
+ - Firewall name for a single node cluster.
+ type: str
+
+ gcp_labels:
+ description:
+ - Optionally provide up to four key-value pairs with which to all GCP entities created by Cloud Manager.
+ type: list
+ elements: dict
+ suboptions:
+ label_key:
+ description: The key of the label.
+ type: str
+ label_value:
+ description: The label value.
+ type: str
+
+ gcp_service_account:
+ description:
+ - The gcp_service_account email in order to enable tiering of cold data to Google Cloud Storage.
+ required: true
+ type: str
+
+ gcp_volume_size:
+ description:
+ - GCP volume size.
+ type: int
+
+ gcp_volume_size_unit:
+ description:
+ - GCP volume size unit.
+ choices: ['GB', 'TB']
+ type: str
+
+ gcp_volume_type:
+ description:
+ - GCP volume type.
+ choices: ['pd-balanced', 'pd-standard', 'pd-ssd']
+ type: str
+
+ instance_type:
+ description:
+ - The type of instance to use, which depends on the license type you choose.
+ - Explore ['custom-4-16384'].
+ - Standard ['n1-standard-8'].
+ - Premium ['n1-standard-32'].
+ - BYOL all instance types defined for PayGo.
+ - For more supported instance types, refer to Cloud Volumes ONTAP Release Notes.
+ default: 'n1-standard-8'
+ type: str
+
+ is_ha:
+ description:
+ - Indicate whether the working environment is an HA pair or not.
+ type: bool
+ default: false
+
+ license_type:
+ description:
+ - The type of license to use.
+ - For single node by Capacity ['capacity-paygo'].
+ - For single node by Node paygo ['gcp-cot-explore-paygo', 'gcp-cot-standard-paygo', 'gcp-cot-premium-paygo'].
+ - For single node by Node byol ['gcp-cot-premium-byol'].
+ - For HA by Capacity ['ha-capacity-paygo'].
+ - For HA by Node paygo ['gcp-ha-cot-explore-paygo', 'gcp-ha-cot-standard-paygo', 'gcp-ha-cot-premium-paygo'].
+ - For HA by Node byol ['gcp-cot-premium-byol'].
+ choices: ['gcp-cot-standard-paygo', 'gcp-cot-explore-paygo', 'gcp-cot-premium-paygo', 'gcp-cot-premium-byol', \
+ 'gcp-ha-cot-standard-paygo', 'gcp-ha-cot-premium-paygo', 'gcp-ha-cot-explore-paygo', 'gcp-ha-cot-premium-byol', \
+ 'capacity-paygo', 'ha-capacity-paygo']
+ type: str
+ default: 'capacity-paygo'
+
+ provided_license:
+ description:
+ - Using a NLF license file for BYOL deployment
+ type: str
+
+ capacity_package_name:
+ description:
+ - Capacity package name is required when selecting a capacity based license.
+ choices: ['Professional', 'Essential', 'Freemium']
+ default: 'Essential'
+ type: str
+ version_added: 21.12.0
+
+ mediator_zone:
+ description:
+ - The zone for mediator.
+ - Option for HA pair only.
+ type: str
+
+ name:
+ description:
+ - The name of the Cloud Manager CVO for GCP to manage.
+ required: true
+ type: str
+
+ network_project_id:
+ description:
+ - The project id in GCP associated with the Subnet.
+ - If not provided, it is assumed that the Subnet is within the previously specified project id.
+ type: str
+
+ node1_zone:
+ description:
+ - Zone for node 1.
+ - Option for HA pair only.
+ type: str
+
+ node2_zone:
+ description:
+ - Zone for node 2.
+ - Option for HA pair only.
+ type: str
+
+ nss_account:
+ description:
+ - The NetApp Support Site account ID to use with this Cloud Volumes ONTAP system.
+ - If the license type is BYOL and an NSS account isn't provided, Cloud Manager tries to use the first existing NSS account.
+ type: str
+
+ ontap_version:
+ description:
+ - The required ONTAP version. Ignored if 'use_latest_version' is set to true.
+ type: str
+ default: 'latest'
+
+ platform_serial_number_node1:
+ description:
+ - For HA BYOL, the serial number for the first node.
+ - Option for HA pair only.
+ type: str
+
+ platform_serial_number_node2:
+ description:
+ - For HA BYOL, the serial number for the second node.
+ - Option for HA pair only.
+ type: str
+
+ project_id:
+ description:
+ - The ID of the GCP project.
+ required: true
+ type: str
+
+ platform_serial_number:
+ description:
+ - The serial number for the system. Required when using 'gcp-cot-premium-byol'.
+ type: str
+
+ state:
+ description:
+ - Whether the specified Cloud Manager CVO for GCP should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ subnet_id:
+ description:
+ - The name of the subnet for Cloud Volumes ONTAP.
+ type: str
+
+ subnet0_node_and_data_connectivity:
+ description:
+ - Subnet path for nic1, required for node and data connectivity.
+ - If using shared VPC, network_project_id must be provided.
+ - Option for HA pair only.
+ type: str
+
+ subnet1_cluster_connectivity:
+ description:
+ - Subnet path for nic2, required for cluster connectivity.
+ - Option for HA pair only.
+ type: str
+
+ subnet2_ha_connectivity:
+ description:
+ - Subnet path for nic3, required for HA connectivity.
+ - Option for HA pair only.
+ type: str
+
+ subnet3_data_replication:
+ description:
+ - Subnet path for nic4, required for HA connectivity.
+ - Option for HA pair only.
+ type: str
+
+ svm_password:
+ description:
+ - The admin password for Cloud Volumes ONTAP.
+ - It will be updated on each run.
+ type: str
+
+ svm_name:
+ description:
+ - The name of the SVM.
+ type: str
+ version_added: 21.22.0
+
+ tier_level:
+ description:
+ - The tiering level when 'capacity_tier' is set to 'cloudStorage'.
+ choices: ['standard', 'nearline', 'coldline']
+ default: 'standard'
+ type: str
+
+ use_latest_version:
+ description:
+ - Indicates whether to use the latest available ONTAP version.
+ type: bool
+ default: true
+
+ vpc_id:
+ required: true
+ description:
+ - The name of the VPC.
+ type: str
+
+ vpc0_firewall_rule_name:
+ description:
+ - Firewall rule name for vpc1.
+ - Option for HA pair only.
+ type: str
+
+ vpc0_node_and_data_connectivity:
+ description:
+ - VPC path for nic1, required for node and data connectivity.
+ - If using shared VPC, network_project_id must be provided.
+ - Option for HA pair only.
+ type: str
+
+ vpc1_cluster_connectivity:
+ description:
+ - VPC path for nic2, required for cluster connectivity.
+ - Option for HA pair only.
+ type: str
+
+ vpc1_firewall_rule_name:
+ description:
+ - Firewall rule name for vpc2.
+ - Option for HA pair only.
+ type: str
+
+ vpc2_ha_connectivity:
+ description:
+ - VPC path for nic3, required for HA connectivity.
+ - Option for HA pair only.
+ type: str
+
+ vpc2_firewall_rule_name:
+ description:
+ - Firewall rule name for vpc3.
+ - Option for HA pair only.
+ type: str
+
+ vpc3_data_replication:
+ description:
+ - VPC path for nic4, required for data replication.
+ - Option for HA pair only.
+ type: str
+
+ vpc3_firewall_rule_name:
+ description:
+ - Firewall rule name for vpc4.
+ - Option for HA pair only.
+ type: str
+
+ workspace_id:
+ description:
+ - The ID of the Cloud Manager workspace where you want to deploy Cloud Volumes ONTAP.
+ - If not provided, Cloud Manager uses the first workspace.
+ - You can find the ID from the Workspace tab on [https://cloudmanager.netapp.com].
+ type: str
+
+ writing_speed_state:
+ description:
+ - The write speed setting for Cloud Volumes ONTAP ['NORMAL','HIGH'].
+ - Default value is 'NORMAL' for non-HA GCP CVO
+ - This argument is not relevant for HA pairs.
+ type: str
+
+ zone:
+ description:
+ - The zone of the region where the working environment will be created.
+ required: true
+ type: str
+
+ upgrade_ontap_version:
+ description:
+ - Indicates whether to upgrade ONTAP image on the CVO.
+ - If the current version already matches the desired version, no action is taken.
+ type: bool
+ default: false
+ version_added: 21.13.0
+
+ update_svm_password:
+ description:
+ - Indicates whether to update svm_password on the CVO.
+ - When set to true, the module is not idempotent, as we cannot read the current password.
+ type: bool
+ default: false
+ version_added: 21.13.0
+
+ subnet_path:
+ description:
+ - Subnet path for a single node cluster.
+ type: str
+ version_added: 21.20.0
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = """
+
+- name: Create NetApp Cloud Manager cvo for GCP
+ netapp.cloudmanager.na_cloudmanager_cvo_gcp:
+ state: present
+ name: ansiblecvogcp
+ project_id: default-project
+ zone: us-east4-b
+ subnet_path: projects/<project>/regions/<region>/subnetworks/<subnetwork>
+ subnet_id: projects/<project>/regions/<region>/subnetworks/<subnetwork>
+ gcp_volume_type: pd-ssd
+ gcp_volume_size: 500
+ gcp_volume_size_unit: GB
+ gcp_service_account: "{{ xxxxxxxxxxxxxxx }}"
+ data_encryption_type: GCP
+ svm_password: "{{ xxxxxxxxxxxxxxx }}"
+ ontap_version: latest
+ use_latest_version: true
+ license_type: capacity-paygo
+ instance_type: n1-standard-8
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+ workspace_id: "{{ xxxxxxxxxxxxxxx }}"
+ capacity_tier: cloudStorage
+ writing_speed_state: NORMAL
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ vpc_id: default
+ gcp_labels:
+ - label_key: key1
+ label_value: value1
+ - label_key: key2
+ label_value: value2
+
+- name: Create NetApp Cloud Manager cvo ha for GCP
+ netapp.cloudmanager.na_cloudmanager_cvo_gcp:
+ state: present
+ name: ansiblecvogcpha
+ project_id: "default-project"
+ zone: us-east1-b
+ gcp_volume_type: pd-ssd
+ gcp_volume_size: 500
+ gcp_volume_size_unit: GB
+ gcp_service_account: "{{ xxxxxxxxxxxxxxx }}"
+ data_encryption_type: GCP
+ svm_password: "{{ xxxxxxxxxxxxxxx }}"
+ ontap_version: ONTAP-9.9.0.T1.gcpha
+ use_latest_version: false
+ license_type: ha-capacity-paygo
+ instance_type: custom-4-16384
+ client_id: "{{ xxxxxxxxxxxxxxx }}"
+ workspace_id: "{{ xxxxxxxxxxxxxxx }}"
+ capacity_tier: cloudStorage
+ writing_speed_state: NORMAL
+ refresh_token: "{{ xxxxxxxxxxxxxxx }}"
+ is_ha: true
+ mediator_zone: us-east1-b
+ node1_zone: us-east1-b
+ node2_zone: us-east1-b
+ subnet0_node_and_data_connectivity: default
+ subnet1_cluster_connectivity: subnet2
+ subnet2_ha_connectivity: subnet3
+ subnet3_data_replication: subnet1
+ vpc0_node_and_data_connectivity: default
+ vpc1_cluster_connectivity: vpc2
+ vpc2_ha_connectivity: vpc3
+ vpc3_data_replication: vpc1
+ vpc_id: default
+ subnet_id: default
+
+"""
+
+RETURN = '''
+working_environment_id:
+ description: Newly created GCP CVO working_environment_id.
+ type: str
+ returned: success
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+
+GCP_LICENSE_TYPES = ["gcp-cot-standard-paygo", "gcp-cot-explore-paygo", "gcp-cot-premium-paygo", "gcp-cot-premium-byol",
+ "gcp-ha-cot-standard-paygo", "gcp-ha-cot-premium-paygo", "gcp-ha-cot-explore-paygo",
+ "gcp-ha-cot-premium-byol", "capacity-paygo", "ha-capacity-paygo"]
+GOOGLE_API_URL = "https://www.googleapis.com/compute/v1/projects"
+
+
+class NetAppCloudManagerCVOGCP:
+ ''' object initialize and class methods '''
+
+ def __init__(self):
+ self.use_rest = False
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ backup_volumes_to_cbs=dict(required=False, type='bool', default=False),
+ capacity_tier=dict(required=False, type='str', choices=['cloudStorage']),
+ client_id=dict(required=True, type='str'),
+ data_encryption_type=dict(required=False, choices=['GCP'], type='str'),
+ gcp_encryption_parameters=dict(required=False, type='str', no_log=True),
+ enable_compliance=dict(required=False, type='bool', default=False),
+ firewall_rule=dict(required=False, type='str'),
+ gcp_labels=dict(required=False, type='list', elements='dict', options=dict(
+ label_key=dict(type='str', no_log=False),
+ label_value=dict(type='str')
+ )),
+ gcp_service_account=dict(required=True, type='str'),
+ gcp_volume_size=dict(required=False, type='int'),
+ gcp_volume_size_unit=dict(required=False, choices=['GB', 'TB'], type='str'),
+ gcp_volume_type=dict(required=False, choices=['pd-balanced', 'pd-standard', 'pd-ssd'], type='str'),
+ instance_type=dict(required=False, type='str', default='n1-standard-8'),
+ is_ha=dict(required=False, type='bool', default=False),
+ license_type=dict(required=False, type='str', choices=GCP_LICENSE_TYPES, default='capacity-paygo'),
+ mediator_zone=dict(required=False, type='str'),
+ name=dict(required=True, type='str'),
+ network_project_id=dict(required=False, type='str'),
+ node1_zone=dict(required=False, type='str'),
+ node2_zone=dict(required=False, type='str'),
+ nss_account=dict(required=False, type='str'),
+ ontap_version=dict(required=False, type='str', default='latest'),
+ platform_serial_number=dict(required=False, type='str'),
+ platform_serial_number_node1=dict(required=False, type='str'),
+ platform_serial_number_node2=dict(required=False, type='str'),
+ project_id=dict(required=True, type='str'),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ subnet_id=dict(required=False, type='str'),
+ subnet0_node_and_data_connectivity=dict(required=False, type='str'),
+ subnet1_cluster_connectivity=dict(required=False, type='str'),
+ subnet2_ha_connectivity=dict(required=False, type='str'),
+ subnet3_data_replication=dict(required=False, type='str'),
+ svm_password=dict(required=False, type='str', no_log=True),
+ svm_name=dict(required=False, type='str'),
+ tier_level=dict(required=False, type='str', choices=['standard', 'nearline', 'coldline'],
+ default='standard'),
+ use_latest_version=dict(required=False, type='bool', default=True),
+ capacity_package_name=dict(required=False, type='str', choices=['Professional', 'Essential', 'Freemium'], default='Essential'),
+ provided_license=dict(required=False, type='str'),
+ vpc_id=dict(required=True, type='str'),
+ vpc0_firewall_rule_name=dict(required=False, type='str'),
+ vpc0_node_and_data_connectivity=dict(required=False, type='str'),
+ vpc1_cluster_connectivity=dict(required=False, type='str'),
+ vpc1_firewall_rule_name=dict(required=False, type='str'),
+ vpc2_firewall_rule_name=dict(required=False, type='str'),
+ vpc2_ha_connectivity=dict(required=False, type='str'),
+ vpc3_data_replication=dict(required=False, type='str'),
+ vpc3_firewall_rule_name=dict(required=False, type='str'),
+ workspace_id=dict(required=False, type='str'),
+ writing_speed_state=dict(required=False, type='str'),
+ zone=dict(required=True, type='str'),
+ upgrade_ontap_version=dict(required=False, type='bool', default=False),
+ update_svm_password=dict(required=False, type='bool', default=False),
+ subnet_path=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ required_if=[
+ ['license_type', 'capacity-paygo', ['capacity_package_name']],
+ ['license_type', 'ha-capacity-paygo', ['capacity_package_name']],
+ ['license_type', 'gcp-cot-premium-byol', ['platform_serial_number']],
+ ['license_type', 'gcp-ha-cot-premium-byol', ['platform_serial_number_node1', 'platform_serial_number_node2']],
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ self.changeable_params = ['svm_password', 'svm_name', 'tier_level', 'gcp_labels', 'ontap_version',
+ 'instance_type', 'license_type', 'writing_speed_state']
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.rest_api.api_root_path = '/occm/api/gcp/%s' % ('ha' if self.parameters['is_ha'] else 'vsa')
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+
+ @staticmethod
+ def has_self_link(param):
+ return param.startswith(("https://www.googleapis.com/compute/", "projects/"))
+
+ def create_cvo_gcp(self):
+
+ if self.parameters.get('workspace_id') is None:
+ response, msg = self.na_helper.get_tenant(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['workspace_id'] = response
+
+ if self.parameters.get('nss_account') is None:
+ if self.parameters.get('platform_serial_number') is not None:
+ if not self.parameters['platform_serial_number'].startswith('Eval-'):
+ if self.parameters['license_type'] == 'gcp-cot-premium-byol' or self.parameters['license_type'] == 'gcp-ha-cot-premium-byol':
+ response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
+ if response is None:
+ self.module.fail_json(msg)
+ self.parameters['nss_account'] = response
+
+ if self.parameters['is_ha'] is True and self.parameters['license_type'] == 'capacity-paygo':
+ self.parameters['license_type'] == 'ha-capacity-paygo'
+
+ json = {"name": self.parameters['name'],
+ "region": self.parameters['zone'],
+ "tenantId": self.parameters['workspace_id'],
+ "vpcId": self.parameters['vpc_id'],
+ "gcpServiceAccount": self.parameters['gcp_service_account'],
+ "gcpVolumeSize": {
+ "size": self.parameters['gcp_volume_size'],
+ "unit": self.parameters['gcp_volume_size_unit']},
+ "gcpVolumeType": self.parameters['gcp_volume_type'],
+ "svmPassword": self.parameters['svm_password'],
+ "backupVolumesToCbs": self.parameters['backup_volumes_to_cbs'],
+ "enableCompliance": self.parameters['enable_compliance'],
+ "vsaMetadata": {
+ "ontapVersion": self.parameters['ontap_version'],
+ "licenseType": self.parameters['license_type'],
+ "useLatestVersion": self.parameters['use_latest_version'],
+ "instanceType": self.parameters['instance_type']}
+ }
+
+ if self.parameters['is_ha'] is False:
+ if self.parameters.get('writing_speed_state') is None:
+ self.parameters['writing_speed_state'] = 'NORMAL'
+ json.update({'writingSpeedState': self.parameters['writing_speed_state'].upper()})
+
+ if self.parameters.get('data_encryption_type') is not None and self.parameters['data_encryption_type'] == "GCP":
+ json.update({'dataEncryptionType': self.parameters['data_encryption_type']})
+ if self.parameters.get('gcp_encryption_parameters') is not None:
+ json.update({"gcpEncryptionParameters": {"key": self.parameters['gcp_encryption_parameters']}})
+
+ if self.parameters.get('provided_license') is not None:
+ json['vsaMetadata'].update({"providedLicense": self.parameters['provided_license']})
+
+ # clean default value if it is not by Capacity license
+ if not self.parameters['license_type'].endswith('capacity-paygo'):
+ json['vsaMetadata'].update({"capacityPackageName": ''})
+
+ if self.parameters.get('capacity_package_name') is not None:
+ json['vsaMetadata'].update({"capacityPackageName": self.parameters['capacity_package_name']})
+
+ if self.parameters.get('project_id'):
+ json.update({'project': self.parameters['project_id']})
+
+ if self.parameters.get('nss_account'):
+ json.update({'nssAccount': self.parameters['nss_account']})
+
+ if self.parameters.get('subnet_id'):
+ json.update({'subnetId': self.parameters['subnet_id']})
+
+ if self.parameters.get('subnet_path'):
+ json.update({'subnetPath': self.parameters['subnet_path']})
+
+ if self.parameters.get('platform_serial_number') is not None:
+ json.update({"serialNumber": self.parameters['platform_serial_number']})
+
+ if self.parameters.get('capacity_tier') is not None and self.parameters['capacity_tier'] == "cloudStorage":
+ json.update({"capacityTier": self.parameters['capacity_tier'],
+ "tierLevel": self.parameters['tier_level']})
+
+ if self.parameters.get('svm_name') is not None:
+ json.update({"svmName": self.parameters['svm_name']})
+
+ if self.parameters.get('gcp_labels') is not None:
+ labels = []
+ for each_label in self.parameters['gcp_labels']:
+ label = {
+ 'labelKey': each_label['label_key'],
+ 'labelValue': each_label['label_value']
+ }
+
+ labels.append(label)
+ json.update({"gcpLabels": labels})
+
+ if self.parameters.get('firewall_rule'):
+ json.update({'firewallRule': self.parameters['firewall_rule']})
+
+ if self.parameters['is_ha'] is True:
+ ha_params = dict()
+
+ if self.parameters.get('network_project_id') is not None:
+ network_project_id = self.parameters.get('network_project_id')
+ else:
+ network_project_id = self.parameters['project_id']
+
+ if not self.has_self_link(self.parameters['subnet_id']):
+ json.update({'subnetId': 'projects/%s/regions/%s/subnetworks/%s' % (network_project_id,
+ self.parameters['zone'][:-2],
+ self.parameters['subnet_id'])})
+
+ if self.parameters.get('platform_serial_number_node1'):
+ ha_params["platformSerialNumberNode1"] = self.parameters['platform_serial_number_node1']
+
+ if self.parameters.get('platform_serial_number_node2'):
+ ha_params["platformSerialNumberNode2"] = self.parameters['platform_serial_number_node2']
+
+ if self.parameters.get('node1_zone'):
+ ha_params["node1Zone"] = self.parameters['node1_zone']
+
+ if self.parameters.get('node2_zone'):
+ ha_params["node2Zone"] = self.parameters['node2_zone']
+
+ if self.parameters.get('mediator_zone'):
+ ha_params["mediatorZone"] = self.parameters['mediator_zone']
+
+ if self.parameters.get('vpc0_node_and_data_connectivity'):
+ if self.has_self_link(self.parameters['vpc0_node_and_data_connectivity']):
+ ha_params["vpc0NodeAndDataConnectivity"] = self.parameters['vpc0_node_and_data_connectivity']
+ else:
+ ha_params["vpc0NodeAndDataConnectivity"] = GOOGLE_API_URL + "/{0}/global/networks/{1}".format(
+ network_project_id, self.parameters['vpc0_node_and_data_connectivity'])
+
+ if self.parameters.get('vpc1_cluster_connectivity'):
+ if self.has_self_link(self.parameters['vpc1_cluster_connectivity']):
+ ha_params["vpc1ClusterConnectivity"] = self.parameters['vpc1_cluster_connectivity']
+ else:
+ ha_params["vpc1ClusterConnectivity"] = GOOGLE_API_URL + "/{0}/global/networks/{1}".format(
+ network_project_id, self.parameters['vpc1_cluster_connectivity'])
+
+ if self.parameters.get('vpc2_ha_connectivity'):
+ if self.has_self_link(self.parameters['vpc2_ha_connectivity']):
+ ha_params["vpc2HAConnectivity"] = self.parameters['vpc2_ha_connectivity']
+ else:
+ ha_params["vpc2HAConnectivity"] = "https://www.googleapis.com/compute/v1/projects/{0}/global/networks" \
+ "/{1}".format(network_project_id, self.parameters['vpc2_ha_connectivity'])
+
+ if self.parameters.get('vpc3_data_replication'):
+ if self.has_self_link(self.parameters['vpc3_data_replication']):
+ ha_params["vpc3DataReplication"] = self.parameters['vpc3_data_replication']
+ else:
+ ha_params["vpc3DataReplication"] = GOOGLE_API_URL + "/{0}/global/networks/{1}".format(
+ network_project_id, self.parameters['vpc3_data_replication'])
+
+ if self.parameters.get('subnet0_node_and_data_connectivity'):
+ if self.has_self_link(self.parameters['subnet0_node_and_data_connectivity']):
+ ha_params["subnet0NodeAndDataConnectivity"] = self.parameters['subnet0_node_and_data_connectivity']
+ else:
+ ha_params["subnet0NodeAndDataConnectivity"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}".\
+ format(network_project_id, self.parameters['zone'][:-2], self.parameters['subnet0_node_and_data_connectivity'])
+
+ if self.parameters.get('subnet1_cluster_connectivity'):
+ if self.has_self_link(self.parameters['subnet1_cluster_connectivity']):
+ ha_params["subnet1ClusterConnectivity"] = self.parameters['subnet1_cluster_connectivity']
+ else:
+ ha_params["subnet1ClusterConnectivity"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}".format(
+ network_project_id, self.parameters['zone'][:-2],
+ self.parameters['subnet1_cluster_connectivity'])
+
+ if self.parameters.get('subnet2_ha_connectivity'):
+ if self.has_self_link(self.parameters['subnet2_ha_connectivity']):
+ ha_params["subnet2HAConnectivity"] = self.parameters['subnet2_ha_connectivity']
+ else:
+ ha_params["subnet2HAConnectivity"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}".format(
+ network_project_id, self.parameters['zone'][:-2],
+ self.parameters['subnet2_ha_connectivity'])
+
+ if self.parameters.get('subnet3_data_replication'):
+ if self.has_self_link(self.parameters['subnet3_data_replication']):
+ ha_params["subnet3DataReplication"] = self.parameters['subnet3_data_replication']
+ else:
+ ha_params["subnet3DataReplication"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}". \
+ format(network_project_id, self.parameters['zone'][:-2],
+ self.parameters['subnet3_data_replication'])
+
+ if self.parameters.get('vpc0_firewall_rule_name'):
+ ha_params["vpc0FirewallRuleName"] = self.parameters['vpc0_firewall_ruleName']
+
+ if self.parameters.get('vpc1_firewall_rule_name'):
+ ha_params["vpc1FirewallRuleName"] = self.parameters['vpc1_firewall_rule_name']
+
+ if self.parameters.get('vpc2_firewall_rule_name'):
+ ha_params["vpc2FirewallRuleName"] = self.parameters['vpc2_firewall_rule_name']
+
+ if self.parameters.get('vpc3_firewall_rule_name'):
+ ha_params["vpc3FirewallRuleName"] = self.parameters['vpc3_firewall_rule_name']
+
+ json["haParams"] = ha_params
+
+ api_url = '%s/working-environments' % self.rest_api.api_root_path
+ response, error, on_cloud_request_id = self.rest_api.post(api_url, json, header=self.headers)
+ if error is not None:
+ self.module.fail_json(
+ msg="Error: unexpected response on creating cvo gcp: %s, %s" % (str(error), str(response)))
+ working_environment_id = response['publicId']
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "create", 60, 60)
+
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating CVO GCP: %s" % str(err))
+ return working_environment_id
+
+ def update_cvo_gcp(self, working_environment_id, modify):
+ base_url = '%s/working-environments/%s/' % (self.rest_api.api_root_path, working_environment_id)
+ for item in modify:
+ if item == 'svm_password':
+ response, error = self.na_helper.update_svm_password(base_url, self.rest_api, self.headers, self.parameters['svm_password'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'svm_name':
+ response, error = self.na_helper.update_svm_name(base_url, self.rest_api, self.headers, self.parameters['svm_name'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'gcp_labels':
+ tag_list = None
+ if 'gcp_labels' in self.parameters:
+ tag_list = self.parameters['gcp_labels']
+ response, error = self.na_helper.update_cvo_tags(base_url, self.rest_api, self.headers, 'gcp_labels', tag_list)
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'tier_level':
+ response, error = self.na_helper.update_tier_level(base_url, self.rest_api, self.headers, self.parameters['tier_level'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'writing_speed_state':
+ response, error = self.na_helper.update_writing_speed_state(base_url, self.rest_api, self.headers, self.parameters['writing_speed_state'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'ontap_version':
+ response, error = self.na_helper.upgrade_ontap_image(self.rest_api, self.headers, self.parameters['ontap_version'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+ if item == 'instance_type' or item == 'license_type':
+ response, error = self.na_helper.update_instance_license_type(base_url, self.rest_api, self.headers,
+ self.parameters['instance_type'],
+ self.parameters['license_type'])
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ def delete_cvo_gcp(self, we_id):
+ """
+ Delete GCP CVO
+ """
+ api_url = '%s/working-environments/%s' % (self.rest_api.api_root_path, we_id)
+ response, error, on_cloud_request_id = self.rest_api.delete(api_url, None, header=self.headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: unexpected response on deleting cvo gcp: %s, %s" % (str(error), str(response)))
+
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "delete", 40, 60)
+ if err is not None:
+ self.module.fail_json(msg="Error: unexpected response wait_on_completion for deleting cvo gcp: %s" % str(err))
+
+ def apply(self):
+ working_environment_id = None
+ modify = None
+
+ current, dummy = self.na_helper.get_working_environment_details_by_name(self.rest_api, self.headers,
+ self.parameters['name'], "gcp")
+ if current:
+ self.parameters['working_environment_id'] = current['publicId']
+ # check the action
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+
+ if current and self.parameters['state'] != 'absent':
+ working_environment_id = current['publicId']
+ modify, error = self.na_helper.is_cvo_update_needed(self.rest_api, self.headers, self.parameters, self.changeable_params, 'gcp')
+ if error is not None:
+ self.module.fail_json(changed=False, msg=error)
+
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == "create":
+ working_environment_id = self.create_cvo_gcp()
+ elif cd_action == "delete":
+ self.delete_cvo_gcp(current['publicId'])
+ else:
+ self.update_cvo_gcp(current['publicId'], modify)
+
+ self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id)
+
+
+def main():
+ """
+ Create Cloud Manager CVO for GCP class instance and invoke apply
+ :return: None
+ """
+ obj_store = NetAppCloudManagerCVOGCP()
+ obj_store.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_info.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_info.py
new file mode 100644
index 000000000..cbdf64f13
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_info.py
@@ -0,0 +1,235 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_info
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_info
+short_description: NetApp Cloud Manager info
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.4.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+ - This module allows you to gather various information about cloudmanager using REST APIs.
+
+options:
+ client_id:
+ required: true
+ type: str
+ description:
+ - The connector ID of the Cloud Manager Connector.
+
+ gather_subsets:
+ type: list
+ elements: str
+ description:
+ - When supplied, this argument will restrict the information collected to a given subset.
+ - Possible values for this argument include
+ - 'working_environments_info'
+ - 'aggregates_info'
+ - 'accounts_info'
+ - 'account_info'
+ - 'agents_info'
+ - 'active_agents_info'
+ default: 'all'
+
+notes:
+- Support check_mode
+'''
+
+EXAMPLES = """
+- name: Get all available subsets
+ netapp.cloudmanager.na_cloudmanager_info:
+ client_id: "{{ client_id }}"
+ refresh_token: "{{ refresh_token }}"
+ gather_subsets:
+ - all
+
+- name: Collect data for cloud manager with indicated subsets
+ netapp.cloudmanager.na_cloudmanager_info:
+ client_id: "{{ client_id }}"
+ refresh_token: "{{ refresh_token }}"
+ gather_subsets:
+ - aggregates_info
+ - working_environments_info
+"""
+
+RETURN = """
+info:
+ description:
+ - a dictionary of collected subsets
+ - each subset if in JSON format
+ returned: success
+ type: dict
+ sample: '{
+ "info": {
+ "working_environments_info": [
+ {
+ "azureVsaWorkingEnvironments": [],
+ "gcpVsaWorkingEnvironments": [],
+ "onPremWorkingEnvironments": [],
+ "vsaWorkingEnvironments": [
+ {
+ "actionsRequired": null,
+ "activeActions": null,
+ "awsProperties": null,
+ "capacityFeatures": null,
+ "cbsProperties": null,
+ "cloudProviderName": "Amazon",
+ "cloudSyncProperties": null,
+ "clusterProperties": null,
+ "complianceProperties": null,
+ "creatorUserEmail": "samlp|NetAppSAML|test_user",
+ "cronJobSchedules": null,
+ "encryptionProperties": null,
+ "fpolicyProperties": null,
+ "haProperties": null,
+ "interClusterLifs": null,
+ "isHA": false,
+ "k8sProperties": null,
+ "monitoringProperties": null,
+ "name": "testAWS",
+ "ontapClusterProperties": null,
+ "publicId": "VsaWorkingEnvironment-3txYJOsX",
+ "replicationProperties": null,
+ "reservedSize": null,
+ "saasProperties": null,
+ "schedules": null,
+ "snapshotPolicies": null,
+ "status": null,
+ "supportRegistrationInformation": [],
+ "supportRegistrationProperties": null,
+ "supportedFeatures": null,
+ "svmName": "svm_testAWS",
+ "svms": null,
+ "tenantId": "Tenant-2345",
+ "workingEnvironmentType": "VSA"
+ }
+ ]
+ },
+ null
+ ]
+ }
+ }'
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+
+class NetAppCloudmanagerInfo(object):
+ '''
+ Contains methods to parse arguments,
+ derive details of CloudmanagerInfo objects
+ and send requests to CloudmanagerInfo via
+ the restApi
+ '''
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ gather_subsets=dict(type='list', elements='str', default='all'),
+ client_id=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Calling generic rest_api class
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.rest_api.api_root_path = None
+ self.methods = dict(
+ working_environments_info=self.na_helper.get_working_environments_info,
+ aggregates_info=self.get_aggregates_info,
+ accounts_info=self.na_helper.get_accounts_info,
+ account_info=self.na_helper.get_account_info,
+ agents_info=self.na_helper.get_agents_info,
+ active_agents_info=self.na_helper.get_active_agents_info,
+ )
+ self.headers = {}
+ if 'client_id' in self.parameters:
+ self.headers['X-Agent-Id'] = self.rest_api.format_client_id(self.parameters['client_id'])
+
+ def get_aggregates_info(self, rest_api, headers):
+ '''
+ Get aggregates info: there are 4 types of working environments.
+ Each of the aggregates will be categorized by working environment type and working environment id
+ '''
+ aggregates = {}
+ # get list of working environments
+ working_environments, error = self.na_helper.get_working_environments_info(rest_api, headers)
+ if error is not None:
+ self.module.fail_json(msg="Error: Failed to get working environments: %s" % str(error))
+ # Four types of working environments:
+ # azureVsaWorkingEnvironments, gcpVsaWorkingEnvironments, onPremWorkingEnvironments, vsaWorkingEnvironments
+ for working_env_type in working_environments:
+ we_aggregates = {}
+ # get aggregates for each working environment
+ for we in working_environments[working_env_type]:
+ provider = we['cloudProviderName']
+ working_environment_id = we['publicId']
+ self.na_helper.set_api_root_path(we, rest_api)
+ if provider != "Amazon":
+ api = '%s/aggregates/%s' % (rest_api.api_root_path, working_environment_id)
+ else:
+ api = '%s/aggregates?workingEnvironmentId=%s' % (rest_api.api_root_path, working_environment_id)
+ response, error, dummy = rest_api.get(api, None, header=headers)
+ if error:
+ self.module.fail_json(msg="Error: Failed to get aggregate list: %s" % str(error))
+ we_aggregates[working_environment_id] = response
+ aggregates[working_env_type] = we_aggregates
+ return aggregates
+
+ def get_info(self, func, rest_api):
+ '''
+ Main get info function
+ '''
+ return self.methods[func](rest_api, self.headers)
+
+ def apply(self):
+ '''
+ Apply action to the Cloud Manager
+ :return: None
+ '''
+ info = {}
+ if 'all' in self.parameters['gather_subsets']:
+ self.parameters['gather_subsets'] = self.methods.keys()
+ for func in self.parameters['gather_subsets']:
+ if func in self.methods:
+ info[func] = self.get_info(func, self.rest_api)
+ else:
+ msg = '%s is not a valid gather_subset. Only %s are allowed' % (func, self.methods.keys())
+ self.module.fail_json(msg=msg)
+ self.module.exit_json(changed=False, info=info)
+
+
+def main():
+ '''
+ Main function
+ '''
+ na_cloudmanager_info = NetAppCloudmanagerInfo()
+ na_cloudmanager_info.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_nss_account.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_nss_account.py
new file mode 100644
index 000000000..49e8e697e
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_nss_account.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_nss_account
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_nss_account
+short_description: NetApp Cloud Manager nss account
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create and Delete nss account.
+
+options:
+ state:
+ description:
+ - Whether the specified nss account should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ client_id:
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ required: true
+ type: str
+
+ public_id:
+ description:
+ - The ID of the NSS account.
+ type: str
+
+ name:
+ description:
+ - The name of the NSS account.
+ type: str
+
+ username:
+ description:
+ - The NSS username.
+ required: true
+ type: str
+
+ password:
+ description:
+ - The NSS password.
+ type: str
+
+ vsa_list:
+ description:
+ - The working environment list.
+ type: list
+ elements: str
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = '''
+- name: Create nss account
+ netapp.cloudmanager.na_cloudmanager_nss_account:
+ state: present
+ name: test_cloud
+ username: test_cloud
+ password: password
+ client_id: your_client_id
+ refresh_token: your_refresh_token
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppCloudmanagerNssAccount(object):
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check parameters and ensure request module is installed
+ """
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ name=dict(required=False, type='str'),
+ client_id=dict(required=True, type='str'),
+ username=dict(required=True, type='str'),
+ password=dict(required=False, type='str', no_log=True),
+ public_id=dict(required=False, type='str'),
+ vsa_list=dict(required=False, type='list', elements='str')
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[['refresh_token', 'sa_client_id']],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ required_if=[
+ ('state', 'present', ['password']),
+ ],
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Calling generic rest_api class
+ self.rest_api = netapp_utils.CloudManagerRestAPI(self.module)
+ self.rest_api.token_type, self.rest_api.token = self.rest_api.get_token()
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.rest_api.api_root_path = '/occm/api/'
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+
+ def get_nss_account(self):
+ response, err, dummy = self.rest_api.send_request("GET", "%s/accounts" % (
+ self.rest_api.api_root_path), None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on getting nss account: %s, %s" % (str(err), str(response)))
+ if response is None:
+ return None
+ nss_accounts = []
+ if response.get('nssAccounts'):
+ nss_accounts = response['nssAccounts']
+ if len(nss_accounts) == 0:
+ return None
+ result = dict()
+ for account in nss_accounts:
+ if account['nssUserName'] == self.parameters['username']:
+ if self.parameters.get('public_id') and self.parameters['public_id'] != account['publicId']:
+ self.module.fail_json(changed=False, msg="Error: public_id '%s' does not match username."
+ % account['publicId'])
+ else:
+ self.parameters['public_id'] = account['publicId']
+ result['name'] = account['accountName']
+ result['user_name'] = account['nssUserName']
+ result['vsa_list'] = account['vsaList']
+ return result
+ return None
+
+ def create_nss_account(self):
+ account = dict()
+ if self.parameters.get('name'):
+ account['accountName'] = self.parameters['name']
+ account['providerKeys'] = {'nssUserName': self.parameters['username'],
+ 'nssPassword': self.parameters['password']}
+ account['vsaList'] = []
+ if self.parameters.get('vsa_list'):
+ account['vsaList'] = self.parameters['vsa_list']
+ response, err, second_dummy = self.rest_api.send_request("POST", "%s/accounts/nss" % (
+ self.rest_api.api_root_path), None, account, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on creating nss account: %s, %s" % (str(err), str(response)))
+
+ def delete_nss_account(self):
+ response, err, second_dummy = self.rest_api.send_request("DELETE", "%s/accounts/%s" % (
+ self.rest_api.api_root_path, self.parameters['public_id']), None, None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on deleting nss account: %s, %s" % (str(err), str(response)))
+ return None
+
+ def apply(self):
+ current = self.get_nss_account()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_nss_account()
+ elif cd_action == 'delete':
+ self.delete_nss_account()
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Main Function'''
+ account = NetAppCloudmanagerNssAccount()
+ account.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_snapmirror.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_snapmirror.py
new file mode 100644
index 000000000..299e13ecf
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_snapmirror.py
@@ -0,0 +1,471 @@
+#!/usr/bin/python
+
+# (c) 2021, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_snapmirror
+'''
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_snapmirror
+short_description: NetApp Cloud Manager SnapMirror
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.6.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create or Delete SnapMirror relationship on Cloud Manager.
+
+options:
+
+ state:
+ description:
+ - Whether the specified snapmirror relationship should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ source_working_environment_name:
+ description:
+ - The working environment name of the source volume.
+ type: str
+
+ destination_working_environment_name:
+ description:
+ - The working environment name of the destination volume.
+ type: str
+
+ source_working_environment_id:
+ description:
+ - The public ID of the working environment of the source volume.
+ type: str
+
+ destination_working_environment_id:
+ description:
+ - The public ID of the working environment of the destination volume.
+ type: str
+
+ destination_aggregate_name:
+ description:
+ - The aggregate in which the volume will be created.
+ - If not provided, Cloud Manager chooses the best aggregate for you.
+ type: str
+
+ policy:
+ description:
+ - The SnapMirror policy name.
+ type: str
+ default: 'MirrorAllSnapshots'
+
+ max_transfer_rate:
+ description:
+ - Maximum transfer rate limit KB/s.
+ - Use 0 for no limit, otherwise use number between 1024 and 2,147,482,624.
+ type: int
+ default: 100000
+
+ source_svm_name:
+ description:
+ - The name of the source SVM.
+ - The default SVM name is used, if a name is not provided.
+ type: str
+
+ destination_svm_name:
+ description:
+ - The name of the destination SVM.
+ - The default SVM name is used, if a name is not provided.
+ type: str
+
+ source_volume_name:
+ description:
+ - The name of the source volume.
+ required: true
+ type: str
+
+ destination_volume_name:
+ description:
+ - The name of the destination volume to be created for snapmirror relationship.
+ required: true
+ type: str
+
+ schedule:
+ description:
+ - The name of the Schedule.
+ type: str
+ default: '1hour'
+
+ provider_volume_type:
+ description:
+ - The underlying cloud provider volume type.
+ - For AWS ['gp3', 'gp2', 'io1', 'st1', 'sc1'].
+ - For Azure ['Premium_LRS','Standard_LRS','StandardSSD_LRS'].
+ - For GCP ['pd-balanced','pd-ssd','pd-standard'].
+ type: str
+
+ capacity_tier:
+ description:
+ - The volume capacity tier for tiering cold data to object storage.
+ - The default values for each cloud provider are as follows, Amazon 'S3', Azure 'Blob', GCP 'cloudStorage'.
+ - If NONE, the capacity tier will not be set on volume creation.
+ type: str
+ choices: ['S3', 'Blob', 'cloudStorage', 'NONE']
+
+ tenant_id:
+ description:
+ - The NetApp account ID that the Connector will be associated with. To be used only when using FSx.
+ type: str
+ version_added: 21.14.0
+
+ client_id:
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ required: true
+ type: str
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = '''
+- name: Create snapmirror with working_environment_name
+ netapp.cloudmanager.na_cloudmanager_snapmirror:
+ state: present
+ source_working_environment_name: source
+ destination_working_environment_name: dest
+ source_volume_name: source
+ destination_volume_name: source_copy
+ policy: MirrorAllSnapshots
+ schedule: 5min
+ max_transfer_rate: 102400
+ client_id: client_id
+ refresh_token: refresh_token
+
+- name: Delete snapmirror
+ netapp.cloudmanager.na_cloudmanager_snapmirror:
+ state: absent
+ source_working_environment_name: source
+ destination_working_environment_name: dest
+ source_volume_name: source
+ destination_volume_name: source_copy
+ client_id: client_id
+ refresh_token: refresh_token
+'''
+
+RETURN = r''' # '''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
+
+
+PROVIDER_TO_CAPACITY_TIER = {'amazon': 'S3', 'azure': 'Blob', 'gcp': 'cloudStorage'}
+
+
+class NetAppCloudmanagerSnapmirror:
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check parameters and ensure request module is installed
+ """
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ source_working_environment_id=dict(required=False, type='str'),
+ destination_working_environment_id=dict(required=False, type='str'),
+ source_working_environment_name=dict(required=False, type='str'),
+ destination_working_environment_name=dict(required=False, type='str'),
+ destination_aggregate_name=dict(required=False, type='str'),
+ policy=dict(required=False, type='str', default='MirrorAllSnapshots'),
+ max_transfer_rate=dict(required=False, type='int', default='100000'),
+ schedule=dict(required=False, type='str', default='1hour'),
+ source_svm_name=dict(required=False, type='str'),
+ destination_svm_name=dict(required=False, type='str'),
+ source_volume_name=dict(required=True, type='str'),
+ destination_volume_name=dict(required=True, type='str'),
+ capacity_tier=dict(required=False, type='str', choices=['NONE', 'S3', 'Blob', 'cloudStorage']),
+ provider_volume_type=dict(required=False, type='str'),
+ tenant_id=dict(required=False, type='str'),
+ client_id=dict(required=True, type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[
+ ['source_working_environment_id', 'source_working_environment_name'],
+ ['refresh_token', 'sa_client_id'],
+ ],
+ required_together=(['source_working_environment_id', 'destination_working_environment_id'],
+ ['source_working_environment_name', 'destination_working_environment_name'],
+ ['sa_client_id', 'sa_secret_key'],
+ ),
+ supports_check_mode=True
+ )
+
+ self.na_helper = NetAppModule()
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+
+ self.rest_api = CloudManagerRestAPI(self.module)
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.rest_api.api_root_path = None
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+ if self.rest_api.simulator:
+ self.headers.update({'x-simulator': 'true'})
+
+ def get_snapmirror(self):
+ source_we_info, dest_we_info, err = self.na_helper.get_working_environment_detail_for_snapmirror(self.rest_api, self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg=err)
+
+ get_url = '/occm/api/replication/status/%s' % source_we_info['publicId']
+ snapmirror_info, err, dummy = self.rest_api.send_request("GET", get_url, None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg='Error getting snapmirror relationship %s: %s.' % (err, snapmirror_info))
+ sm_found = False
+ snapmirror = None
+ for sm in snapmirror_info:
+ if sm['destination']['volumeName'] == self.parameters['destination_volume_name']:
+ sm_found = True
+ snapmirror = sm
+ break
+
+ if not sm_found:
+ return None
+ result = {
+ 'source_working_environment_id': source_we_info['publicId'],
+ 'destination_svm_name': snapmirror['destination']['svmName'],
+ 'destination_working_environment_id': dest_we_info['publicId'],
+ }
+ if not dest_we_info['publicId'].startswith('fs-'):
+ result['cloud_provider_name'] = dest_we_info['cloudProviderName']
+ return result
+
+ def create_snapmirror(self):
+ snapmirror_build_data = {}
+ replication_request = {}
+ replication_volume = {}
+ source_we_info, dest_we_info, err = self.na_helper.get_working_environment_detail_for_snapmirror(self.rest_api, self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg=err)
+ if self.parameters.get('capacity_tier') is not None:
+ if self.parameters['capacity_tier'] == 'NONE':
+ self.parameters.pop('capacity_tier')
+ else:
+ if dest_we_info.get('cloudProviderName'):
+ self.parameters['capacity_tier'] = PROVIDER_TO_CAPACITY_TIER[dest_we_info['cloudProviderName'].lower()]
+
+ interclusterlifs_info = self.get_interclusterlifs(source_we_info['publicId'], dest_we_info['publicId'])
+
+ if source_we_info['workingEnvironmentType'] != 'ON_PREM':
+ source_volumes = self.get_volumes(source_we_info, self.parameters['source_volume_name'])
+ else:
+ source_volumes = self.get_volumes_on_prem(source_we_info, self.parameters['source_volume_name'])
+
+ if len(source_volumes) == 0:
+ self.module.fail_json(changed=False, msg='source volume not found')
+
+ vol_found = False
+ vol_dest_quote = {}
+ source_volume_resp = {}
+ for vol in source_volumes:
+ if vol['name'] == self.parameters['source_volume_name']:
+ vol_found = True
+ vol_dest_quote = vol
+ source_volume_resp = vol
+ if self.parameters.get('source_svm_name') is not None and vol['svmName'] != self.parameters['source_svm_name']:
+ vol_found = False
+ if vol_found:
+ break
+
+ if not vol_found:
+ self.module.fail_json(changed=False, msg='source volume not found')
+
+ if self.parameters.get('source_svm_name') is None:
+ self.parameters['source_svm_name'] = source_volume_resp['svmName']
+
+ if self.parameters.get('destination_svm_name') is None:
+ if dest_we_info.get('svmName') is not None:
+ self.parameters['destination_svm_name'] = dest_we_info['svmName']
+ else:
+ self.parameters['destination_working_environment_name'] = dest_we_info['name']
+ dest_working_env_detail, err = self.na_helper.get_working_environment_details_by_name(self.rest_api,
+ self.headers,
+ self.parameters['destination_working_environment_name'])
+ if err:
+ self.module.fail_json(changed=False, msg='Error getting destination info %s: %s.' % (err, dest_working_env_detail))
+ self.parameters['destination_svm_name'] = dest_working_env_detail['svmName']
+
+ if dest_we_info.get('workingEnvironmentType') and dest_we_info['workingEnvironmentType'] != 'ON_PREM'\
+ and not dest_we_info['publicId'].startswith('fs-'):
+ quote = self.build_quote_request(source_we_info, dest_we_info, vol_dest_quote)
+ quote_response = self.quote_volume(quote)
+ replication_volume['numOfDisksApprovedToAdd'] = int(quote_response['numOfDisks'])
+ if 'iops' in quote:
+ replication_volume['iops'] = quote['iops']
+ if 'throughput' in quote:
+ replication_volume['throughput'] = quote['throughput']
+ if self.parameters.get('destination_aggregate_name') is not None:
+ replication_volume['advancedMode'] = True
+ else:
+ replication_volume['advancedMode'] = False
+ replication_volume['destinationAggregateName'] = quote_response['aggregateName']
+ if self.parameters.get('provider_volume_type') is None:
+ replication_volume['destinationProviderVolumeType'] = source_volume_resp['providerVolumeType']
+
+ if self.parameters.get('capacity_tier') is not None:
+ replication_volume['destinationCapacityTier'] = self.parameters['capacity_tier']
+ replication_request['sourceWorkingEnvironmentId'] = source_we_info['publicId']
+ if dest_we_info['publicId'].startswith('fs-'):
+ replication_request['destinationFsxId'] = dest_we_info['publicId']
+ else:
+ replication_request['destinationWorkingEnvironmentId'] = dest_we_info['publicId']
+ replication_volume['sourceVolumeName'] = self.parameters['source_volume_name']
+ replication_volume['destinationVolumeName'] = self.parameters['destination_volume_name']
+ replication_request['policyName'] = self.parameters['policy']
+ replication_request['scheduleName'] = self.parameters['schedule']
+ replication_request['maxTransferRate'] = self.parameters['max_transfer_rate']
+ replication_volume['sourceSvmName'] = source_volume_resp['svmName']
+ replication_volume['destinationSvmName'] = self.parameters['destination_svm_name']
+ replication_request['sourceInterclusterLifIps'] = [interclusterlifs_info['interClusterLifs'][0]['address']]
+ replication_request['destinationInterclusterLifIps'] = [interclusterlifs_info['peerInterClusterLifs'][0]['address']]
+
+ snapmirror_build_data['replicationRequest'] = replication_request
+ snapmirror_build_data['replicationVolume'] = replication_volume
+
+ if dest_we_info['publicId'].startswith('fs-'):
+ api = '/occm/api/replication/fsx'
+ elif dest_we_info['workingEnvironmentType'] != 'ON_PREM':
+ api = '/occm/api/replication/vsa'
+ else:
+ api = '/occm/api/replication/onprem'
+
+ response, err, on_cloud_request_id = self.rest_api.send_request("POST", api, None, snapmirror_build_data, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg='Error creating snapmirror relationship %s: %s.' % (err, response))
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % (str(on_cloud_request_id))
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "snapmirror", "create", 20, 5)
+ if err is not None:
+ self.module.fail_json(changed=False, msg=err)
+
+ def get_volumes(self, working_environment_detail, name):
+ self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
+ response, err, dummy = self.rest_api.send_request("GET", "%s/volumes?workingEnvironmentId=%s&name=%s" % (
+ self.rest_api.api_root_path, working_environment_detail['publicId'], name), None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg='Error getting volume %s: %s.' % (err, response))
+ return response
+
+ def quote_volume(self, quote):
+ response, err, on_cloud_request_id = self.rest_api.send_request("POST", '%s/volumes/quote' %
+ self.rest_api.api_root_path, None, quote, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg='Error quoting destination volume %s: %s.' % (err, response))
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % (str(on_cloud_request_id))
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "volume", "quote", 20, 5)
+ if err is not None:
+ self.module.fail_json(changed=False, msg=err)
+ return response
+
+ def get_volumes_on_prem(self, working_environment_detail, name):
+ response, err, dummy = self.rest_api.send_request("GET", "/occm/api/onprem/volumes?workingEnvironmentId=%s&name=%s" %
+ (working_environment_detail['publicId'], name), None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg='Error getting volume on prem %s: %s.' % (err, response))
+ return response
+
+ def get_aggregate_detail(self, working_environment_detail, aggregate_name):
+ if working_environment_detail['workingEnvironmentType'] == 'ON_PREM':
+ api = "/occm/api/onprem/aggregates?workingEnvironmentId=%s" % working_environment_detail['publicId']
+ else:
+ self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
+ api_root_path = self.rest_api.api_root_path
+ if working_environment_detail['cloudProviderName'] != "Amazon":
+ api = '%s/aggregates/%s'
+ else:
+ api = '%s/aggregates?workingEnvironmentId=%s'
+ api = api % (api_root_path, working_environment_detail['publicId'])
+ response, error, dummy = self.rest_api.get(api, header=self.headers)
+ if error:
+ self.module.fail_json(msg="Error: Failed to get aggregate list: %s" % str(error))
+ for aggr in response:
+ if aggr['name'] == aggregate_name:
+ return aggr
+ return None
+
+ def build_quote_request(self, source_we_info, dest_we_info, vol_dest_quote):
+ quote = dict()
+ quote['size'] = {'size': vol_dest_quote['size']['size'], 'unit': vol_dest_quote['size']['unit']}
+ quote['name'] = self.parameters['destination_volume_name']
+ quote['snapshotPolicyName'] = vol_dest_quote['snapshotPolicy']
+ quote['enableDeduplication'] = vol_dest_quote['deduplication']
+ quote['enableThinProvisioning'] = vol_dest_quote['thinProvisioning']
+ quote['enableCompression'] = vol_dest_quote['compression']
+ quote['verifyNameUniqueness'] = True
+ quote['replicationFlow'] = True
+
+ # Use source working environment to get physical properties info of volumes
+ aggregate = self.get_aggregate_detail(source_we_info, vol_dest_quote['aggregateName'])
+ if aggregate is None:
+ self.module.fail_json(changed=False, msg='Error getting aggregate on source volume')
+ # All the volumes in one aggregate have the same physical properties
+ if source_we_info['workingEnvironmentType'] != 'ON_PREM':
+ if aggregate['providerVolumes'][0]['diskType'] == 'gp3' or aggregate['providerVolumes'][0]['diskType'] == 'io1'\
+ or aggregate['providerVolumes'][0]['diskType'] == 'io2':
+ quote['iops'] = aggregate['providerVolumes'][0]['iops']
+ if aggregate['providerVolumes'][0]['diskType'] == 'gp3':
+ quote['throughput'] = aggregate['providerVolumes'][0]['throughput']
+ quote['workingEnvironmentId'] = dest_we_info['publicId']
+ quote['svmName'] = self.parameters['destination_svm_name']
+ if self.parameters.get('capacity_tier') is not None:
+ quote['capacityTier'] = self.parameters['capacity_tier']
+
+ if self.parameters.get('provider_volume_type') is None:
+ quote['providerVolumeType'] = vol_dest_quote['providerVolumeType']
+ else:
+ quote['providerVolumeType'] = self.parameters['provider_volume_type']
+
+ return quote
+
+ def delete_snapmirror(self, sm_detail):
+ api_delete = '/occm/api/replication/%s/%s/%s' %\
+ (sm_detail['destination_working_environment_id'], sm_detail['destination_svm_name'], self.parameters['destination_volume_name'])
+ dummy, err, dummy_second = self.rest_api.send_request("DELETE", api_delete, None, None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg='Error deleting snapmirror relationship %s: %s.' % (err, dummy))
+
+ def get_interclusterlifs(self, source_we_id, dest_we_id):
+ api_get = '/occm/api/replication/intercluster-lifs?peerWorkingEnvironmentId=%s&workingEnvironmentId=%s' % (dest_we_id, source_we_id)
+ response, err, dummy_second = self.rest_api.send_request("GET", api_get, None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg='Error getting interclusterlifs %s: %s.' % (err, response))
+ return response
+
+ def apply(self):
+ current = self.get_snapmirror()
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_snapmirror()
+ elif cd_action == 'delete':
+ self.delete_snapmirror(current)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Main Function'''
+ volume = NetAppCloudmanagerSnapmirror()
+ volume.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_volume.py b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_volume.py
new file mode 100644
index 000000000..62c898c57
--- /dev/null
+++ b/ansible_collections/netapp/cloudmanager/plugins/modules/na_cloudmanager_volume.py
@@ -0,0 +1,660 @@
+#!/usr/bin/python
+
+# (c) 2022, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+'''
+na_cloudmanager_volume
+'''
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: na_cloudmanager_volume
+short_description: NetApp Cloud Manager volume
+extends_documentation_fragment:
+ - netapp.cloudmanager.netapp.cloudmanager
+version_added: '21.3.0'
+author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
+
+description:
+- Create, Modify or Delete volume on Cloud Manager.
+
+options:
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ name:
+ description:
+ - The name of the volume.
+ required: true
+ type: str
+
+ working_environment_name:
+ description:
+ - The working environment name where the volume will be created.
+ type: str
+
+ working_environment_id:
+ description:
+ - The public ID of the working environment where the volume will be created.
+ type: str
+
+ client_id:
+ description:
+ - The connector ID of the Cloud Manager Connector.
+ required: true
+ type: str
+
+ size:
+ description:
+ - The size of the volume.
+ type: float
+
+ size_unit:
+ description:
+ - The size unit of volume.
+ choices: ['GB']
+ default: 'GB'
+ type: str
+
+ snapshot_policy_name:
+ description:
+ - The snapshot policy name.
+ type: str
+
+ provider_volume_type:
+ description:
+ - The underlying cloud provider volume type.
+ - For AWS is ["gp3", "gp2", "io1", "st1", "sc1"].
+ - For Azure is ['Premium_LRS','Standard_LRS','StandardSSD_LRS'].
+ - For GCP is ['pd-balanced','pd-ssd','pd-standard'].
+ type: str
+
+ enable_deduplication:
+ description:
+ - Enabling deduplication.
+ - Default to true if not specified.
+ type: bool
+
+ enable_compression:
+ description:
+ - Enabling cpmpression.
+ - Default to true if not specified.
+ type: bool
+
+ enable_thin_provisioning:
+ description:
+ - Enabling thin provisioning.
+ - Default to true if not specified.
+ type: bool
+
+ svm_name:
+ description:
+ - The name of the SVM. The default SVM name is used, if a name is not provided.
+ type: str
+
+ aggregate_name:
+ description:
+ - The aggregate in which the volume will be created. If not provided, Cloud Manager chooses the best aggregate.
+ type: str
+
+ capacity_tier:
+ description:
+ - The volume's capacity tier for tiering cold data to object storage.
+ - The default values for each cloud provider are as follows. Amazon as 'S3', Azure as 'Blob', GCP as 'cloudStorage'.
+ - If 'NONE', the capacity tier will not be set on volume creation.
+ choices: ['NONE', 'S3', 'Blob', 'cloudStorage']
+ type: str
+
+ tiering_policy:
+ description:
+ - The tiering policy.
+ choices: ['none', 'snapshot_only', 'auto', 'all']
+ type: str
+
+ export_policy_type:
+ description:
+ - The export policy type (NFS protocol parameters).
+ type: str
+
+ export_policy_ip:
+ description:
+ - Custom export policy list of IPs (NFS protocol parameters).
+ type: list
+ elements: str
+
+ export_policy_nfs_version:
+ description:
+ - Export policy protocol (NFS protocol parameters).
+ type: list
+ elements: str
+
+ iops:
+ description:
+ - Provisioned IOPS. Needed only when provider_volume_type is "io1".
+ type: int
+
+ throughput:
+ description:
+ - Unit is Mb/s. Valid range 125-1000.
+ - Required only when provider_volume_type is 'gp3'.
+ type: int
+
+ volume_protocol:
+ description:
+ - The protocol for the volume. This affects the provided parameters.
+ choices: ['nfs', 'cifs', 'iscsi']
+ type: str
+ default: 'nfs'
+
+ share_name:
+ description:
+ - Share name (CIFS protocol parameters).
+ type: str
+
+ permission:
+ description:
+ - CIFS share permission type (CIFS protocol parameters).
+ type: str
+
+ users:
+ description:
+ - List of users with the permission (CIFS protocol parameters).
+ type: list
+ elements: str
+
+ igroups:
+ description:
+ - List of igroups (iSCSI protocol parameters).
+ type: list
+ elements: str
+
+ os_name:
+ description:
+ - Operating system (iSCSI protocol parameters).
+ type: str
+
+ tenant_id:
+ description:
+ - The NetApp account ID that the Connector will be associated with. To be used only when using FSx.
+ type: str
+ version_added: 21.20.0
+
+ initiators:
+ description:
+ - Set of attributes of Initiators (iSCSI protocol parameters).
+ type: list
+ elements: dict
+ suboptions:
+ iqn:
+ description: The initiator node name.
+ required: true
+ type: str
+ alias:
+ description: The alias which associates with the node.
+ required: true
+ type: str
+
+notes:
+- Support check_mode.
+'''
+
+EXAMPLES = '''
+- name: Create nfs volume with working_environment_name
+ netapp.cloudmanager.na_cloudmanager_volume:
+ state: present
+ name: test_vol
+ size: 15
+ size_unit: GB
+ working_environment_name: working_environment_1
+ client_id: client_id
+ refresh_token: refresh_token
+ svm_name: svm_1
+ snapshot_policy_name: default
+ export_policy_type: custom
+ export_policy_ip: ["10.0.0.1/16"]
+ export_policy_nfs_version: ["nfs3","nfs4"]
+
+- name: Delete volume
+ netapp.cloudmanager.na_cloudmanager_volume:
+ state: absent
+ name: test_vol
+ working_environment_name: working_environment_1
+ client_id: client_id
+ refresh_token: refresh_token
+ svm_name: svm_1
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
+from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
+
+
+class NetAppCloudmanagerVolume(object):
+
+ def __init__(self):
+ """
+ Parse arguments, setup state variables,
+ check parameters and ensure request module is installed
+ """
+ self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ working_environment_id=dict(required=False, type='str'),
+ working_environment_name=dict(required=False, type='str'),
+ client_id=dict(required=True, type='str'),
+ size=dict(required=False, type='float'),
+ size_unit=dict(required=False, choices=['GB'], default='GB'),
+ snapshot_policy_name=dict(required=False, type='str'),
+ provider_volume_type=dict(required=False, type='str'),
+ enable_deduplication=dict(required=False, type='bool'),
+ enable_thin_provisioning=dict(required=False, type='bool'),
+ enable_compression=dict(required=False, type='bool'),
+ svm_name=dict(required=False, type='str'),
+ aggregate_name=dict(required=False, type='str'),
+ capacity_tier=dict(required=False, type='str', choices=['NONE', 'S3', 'Blob', 'cloudStorage']),
+ tiering_policy=dict(required=False, type='str', choices=['none', 'snapshot_only', 'auto', 'all']),
+ export_policy_type=dict(required=False, type='str'),
+ export_policy_ip=dict(required=False, type='list', elements='str'),
+ export_policy_nfs_version=dict(required=False, type='list', elements='str'),
+ iops=dict(required=False, type='int'),
+ throughput=dict(required=False, type='int'),
+ volume_protocol=dict(required=False, type='str', choices=['nfs', 'cifs', 'iscsi'], default='nfs'),
+ share_name=dict(required=False, type='str'),
+ permission=dict(required=False, type='str'),
+ users=dict(required=False, type='list', elements='str'),
+ igroups=dict(required=False, type='list', elements='str'),
+ os_name=dict(required=False, type='str'),
+ tenant_id=dict(required=False, type='str'),
+ initiators=dict(required=False, type='list', elements='dict', options=dict(
+ alias=dict(required=True, type='str'),
+ iqn=dict(required=True, type='str'),)),
+
+ ))
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_one_of=[
+ ['refresh_token', 'sa_client_id'],
+ ['working_environment_name', 'working_environment_id'],
+ ],
+ required_together=[['sa_client_id', 'sa_secret_key']],
+ required_if=[
+ ['provider_volume_type', 'gp3', ['iops', 'throughput']],
+ ['provider_volume_type', 'io1', ['iops']],
+ ['capacity_tier', 'S3', ['tiering_policy']],
+ ],
+ # enable_thin_provisioning reflects storage efficiency.
+ required_by={
+ 'capacity_tier': ('tiering_policy', 'enable_thin_provisioning'),
+ },
+ supports_check_mode=True
+ )
+ self.na_helper = NetAppModule()
+ # set up state variables
+ self.parameters = self.na_helper.set_parameters(self.module.params)
+ # Calling generic rest_api class
+ self.rest_api = netapp_utils.CloudManagerRestAPI(self.module)
+ self.rest_api.token_type, self.rest_api.token = self.rest_api.get_token()
+ self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
+ self.headers = {
+ 'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
+ }
+ if self.rest_api.simulator:
+ self.headers.update({'x-simulator': 'true'})
+ if self.parameters.get('tenant_id'):
+ working_environment_detail, error = self.na_helper.get_aws_fsx_details(self.rest_api, self.headers, self.parameters['working_environment_name'])
+ elif self.parameters.get('working_environment_id'):
+ working_environment_detail, error = self.na_helper.get_working_environment_details(self.rest_api, self.headers)
+ else:
+ working_environment_detail, error = self.na_helper.get_working_environment_details_by_name(self.rest_api,
+ self.headers,
+ self.parameters['working_environment_name'])
+ if working_environment_detail is None:
+ self.module.fail_json(msg="Error: Cannot find working environment, if it is an AWS FSxN, please provide tenant_id: %s" % str(error))
+ self.parameters['working_environment_id'] = working_environment_detail['publicId']\
+ if working_environment_detail.get('publicId') else working_environment_detail['id']
+ self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
+ self.is_fsx = self.parameters['working_environment_id'].startswith('fs-')
+
+ if self.parameters.get('svm_name') is None:
+ fsx_path = ''
+ if self.is_fsx:
+ fsx_path = '/svms'
+ response, err, dummy = self.rest_api.send_request("GET", "%s/working-environments/%s%s" % (
+ self.rest_api.api_root_path, self.parameters['working_environment_id'], fsx_path), None, None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on getting svm: %s, %s" % (str(err), str(response)))
+ if self.is_fsx:
+ self.parameters['svm_name'] = response[0]['name']
+ else:
+ self.parameters['svm_name'] = response['svmName']
+
+ if self.parameters['volume_protocol'] == 'nfs':
+ extra_options = []
+ for option in ['share_name', 'permission', 'users', 'igroups', 'os_name', 'initiator']:
+ if self.parameters.get(option) is not None:
+ extra_options.append(option)
+ if len(extra_options) > 0:
+ self.module.fail_json(msg="Error: The following options are not allowed when volume_protocol is nfs: "
+ " %s" % extra_options)
+ elif self.parameters['volume_protocol'] == 'cifs':
+ extra_options = []
+ for option in ['export_policy_type', 'export_policy_ip', 'export_policy_nfs_version', 'igroups', 'os_name', 'initiator']:
+ if self.parameters.get(option) is not None:
+ extra_options.append(option)
+ if len(extra_options) > 0:
+ self.module.fail_json(msg="Error: The following options are not allowed when volume_protocol is cifs: "
+ "%s" % extra_options)
+ else:
+ extra_options = []
+ for option in ['export_policy_type', 'export_policy_ip', 'export_policy_nfs_version', 'share_name', 'permission', 'users']:
+ if self.parameters.get(option) is not None:
+ extra_options.append(option)
+ if len(extra_options) > 0:
+ self.module.fail_json(msg="Error: The following options are not allowed when volume_protocol is iscsi: "
+ "%s" % extra_options)
+
+ if self.parameters.get('igroups'):
+ current_igroups = []
+ for igroup in self.parameters['igroups']:
+ current = self.get_igroup(igroup)
+ current_igroups.append(current)
+ if any(isinstance(x, dict) for x in current_igroups) and None in current_igroups:
+ self.module.fail_json(changed=False, msg="Error: can not specify existing"
+ "igroup and new igroup together.")
+ if len(current_igroups) > 1 and None in current_igroups:
+ self.module.fail_json(changed=False, msg="Error: can not create more than one igroups.")
+ if current_igroups[0] is None:
+ if self.parameters.get('initiators') is None:
+ self.module.fail_json(changed=False, msg="Error: initiator is required when creating new igroup.")
+
+ if self.parameters.get('users'):
+ # When creating volume, 'Everyone' must have upper case E, 'everyone' will not work.
+ # When modifying volume, 'everyone' is fine.
+ new_users = []
+ for user in self.parameters['users']:
+ if user.lower() == 'everyone':
+ new_users.append('Everyone')
+ else:
+ new_users.append(user)
+ self.parameters['users'] = new_users
+
+ def get_volume(self):
+ if self.is_fsx:
+ query_param = 'fileSystemId'
+ else:
+ query_param = 'workingEnvironmentId'
+ response, err, dummy = self.rest_api.send_request("GET", "%s/volumes?%s=%s" % (
+ self.rest_api.api_root_path, query_param, self.parameters['working_environment_id']), None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on getting volume: %s, %s" % (str(err), str(response)))
+ target_vol = dict()
+ if response is None:
+ return None
+ for volume in response:
+ if volume['name'] == self.parameters['name']:
+ target_vol['name'] = volume['name']
+ target_vol['enable_deduplication'] = volume['deduplication']
+ target_vol['enable_thin_provisioning'] = volume['thinProvisioning']
+ target_vol['enable_compression'] = volume['compression']
+ if self.parameters.get('size'):
+ target_vol['size'] = volume['size']['size']
+ if self.parameters.get('size_unit'):
+ target_vol['size_unit'] = volume['size']['unit']
+ if self.parameters.get('export_policy_nfs_version') and volume.get('exportPolicyInfo'):
+ target_vol['export_policy_nfs_version'] = volume['exportPolicyInfo']['nfsVersion']
+ if self.parameters.get('export_policy_ip') and volume.get('exportPolicyInfo'):
+ target_vol['export_policy_ip'] = volume['exportPolicyInfo']['ips']
+ if self.parameters.get('export_policy_type') and volume.get('exportPolicyInfo'):
+ target_vol['export_policy_type'] = volume['exportPolicyInfo']['policyType']
+ if self.parameters.get('snapshot_policy'):
+ target_vol['snapshot_policy'] = volume['snapshotPolicy']
+ if self.parameters.get('provider_volume_type'):
+ target_vol['provider_volume_type'] = volume['providerVolumeType']
+ if self.parameters.get('capacity_tier') and self.parameters.get('capacity_tier') != 'NONE':
+ target_vol['capacity_tier'] = volume['capacityTier']
+ if self.parameters.get('tiering_policy'):
+ target_vol['tiering_policy'] = volume['tieringPolicy']
+ if self.parameters.get('share_name') and volume.get('shareInfo'):
+ target_vol['share_name'] = volume['shareInfo'][0]['shareName']
+ if self.parameters.get('users') and volume.get('shareInfo'):
+ if len(volume['shareInfo'][0]['accessControlList']) > 0:
+ target_vol['users'] = volume['shareInfo'][0]['accessControlList'][0]['users']
+ else:
+ target_vol['users'] = []
+ if self.parameters.get('users') and volume.get('shareInfo'):
+ if len(volume['shareInfo'][0]['accessControlList']) > 0:
+ target_vol['permission'] = volume['shareInfo'][0]['accessControlList'][0]['permission']
+ else:
+ target_vol['permission'] = []
+ if self.parameters.get('os_name') and volume.get('iscsiInfo'):
+ target_vol['os_name'] = volume['iscsiInfo']['osName']
+ if self.parameters.get('igroups') and volume.get('iscsiInfo'):
+ target_vol['igroups'] = volume['iscsiInfo']['igroups']
+ return target_vol
+ return None
+
+ def create_volume(self):
+ exclude_list = ['client_id', 'size_unit', 'export_policy_name', 'export_policy_type', 'export_policy_ip',
+ 'export_policy_nfs_version', 'capacity_tier']
+ quote = self.na_helper.convert_module_args_to_api(self.parameters, exclude_list)
+ quote['verifyNameUniqueness'] = True # Always hard coded to true.
+ quote['unit'] = self.parameters['size_unit']
+ quote['size'] = {'size': self.parameters['size'], 'unit': self.parameters['size_unit']}
+ create_aggregate_if_not_exists = True
+ if self.parameters.get('aggregate_name'):
+ quote['aggregateName'] = self.parameters['aggregate_name']
+ create_aggregate_if_not_exists = False
+
+ if self.parameters.get('capacity_tier') and self.parameters['capacity_tier'] != "NONE":
+ quote['capacityTier'] = self.parameters['capacity_tier']
+
+ if self.parameters['volume_protocol'] == 'nfs':
+ quote['exportPolicyInfo'] = dict()
+ if self.parameters.get('export_policy_type'):
+ quote['exportPolicyInfo']['policyType'] = self.parameters['export_policy_type']
+ if self.parameters.get('export_policy_ip'):
+ quote['exportPolicyInfo']['ips'] = self.parameters['export_policy_ip']
+ if self.parameters.get('export_policy_nfs_version'):
+ quote['exportPolicyInfo']['nfsVersion'] = self.parameters['export_policy_nfs_version']
+ elif self.parameters['volume_protocol'] == 'iscsi':
+ iscsi_info = self.iscsi_volume_helper()
+ quote.update(iscsi_info)
+ else:
+ quote['shareInfo'] = dict()
+ quote['shareInfo']['accessControl'] = dict()
+ quote['shareInfo']['accessControl']['users'] = self.parameters['users']
+ if self.parameters.get('permission'):
+ quote['shareInfo']['accessControl']['permission'] = self.parameters['permission']
+ if self.parameters.get('share_name'):
+ quote['shareInfo']['shareName'] = self.parameters['share_name']
+ if not self.is_fsx:
+ response, err, dummy = self.rest_api.send_request("POST", "%s/volumes/quote" % self.rest_api.api_root_path,
+ None, quote, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on quoting volume: %s, %s" % (str(err), str(response)))
+ quote['newAggregate'] = response['newAggregate']
+ quote['aggregateName'] = response['aggregateName']
+ quote['maxNumOfDisksApprovedToAdd'] = response['numOfDisks']
+ else:
+ quote['fileSystemId'] = self.parameters['working_environment_id']
+ if self.parameters.get('enable_deduplication'):
+ quote['deduplication'] = self.parameters.get('enable_deduplication')
+ if self.parameters.get('enable_thin_provisioning'):
+ quote['thinProvisioning'] = self.parameters.get('enable_thin_provisioning')
+ if self.parameters.get('enable_compression'):
+ quote['compression'] = self.parameters.get('enable_compression')
+ if self.parameters.get('snapshot_policy_name'):
+ quote['snapshotPolicy'] = self.parameters['snapshot_policy_name']
+ if self.parameters.get('capacity_tier') and self.parameters['capacity_tier'] != "NONE":
+ quote['capacityTier'] = self.parameters['capacity_tier']
+ if self.parameters.get('tiering_policy'):
+ quote['tieringPolicy'] = self.parameters['tiering_policy']
+ if self.parameters.get('provider_volume_type'):
+ quote['providerVolumeType'] = self.parameters['provider_volume_type']
+ if self.parameters.get('iops'):
+ quote['iops'] = self.parameters.get('iops')
+ if self.parameters.get('throughput'):
+ quote['throughput'] = self.parameters.get('throughput')
+ response, err, on_cloud_request_id = self.rest_api.send_request("POST", "%s/volumes?createAggregateIfNotFound=%s" % (
+ self.rest_api.api_root_path, create_aggregate_if_not_exists), None, quote, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected on creating volume: %s, %s" % (str(err), str(response)))
+ wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % (str(on_cloud_request_id))
+ err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "volume", "create", 20, 5)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response wait_on_completion for creating volume: %s, %s" % (str(err), str(response)))
+
+ def modify_volume(self, modify):
+ vol = dict()
+ if self.parameters['volume_protocol'] == 'nfs':
+ export_policy_info = dict()
+ if self.parameters.get('export_policy_type'):
+ export_policy_info['policyType'] = self.parameters['export_policy_type']
+ if self.parameters.get('export_policy_ip'):
+ export_policy_info['ips'] = self.parameters['export_policy_ip']
+ if self.parameters.get('export_policy_nfs_version'):
+ export_policy_info['nfsVersion'] = self.parameters['export_policy_nfs_version']
+ vol['exportPolicyInfo'] = export_policy_info
+ elif self.parameters['volume_protocol'] == 'cifs':
+ vol['shareInfo'] = dict()
+ vol['shareInfo']['accessControlList'] = []
+ vol['shareInfo']['accessControlList'].append(dict())
+ if self.parameters.get('users'):
+ vol['shareInfo']['accessControlList'][0]['users'] = self.parameters['users']
+ if self.parameters.get('permission'):
+ vol['shareInfo']['accessControlList'][0]['permission'] = self.parameters['permission']
+ if self.parameters.get('share_name'):
+ vol['shareInfo']['shareName'] = self.parameters['share_name']
+ if modify.get('snapshot_policy_name'):
+ vol['snapshotPolicyName'] = self.parameters.get('snapshot_policy_name')
+ if modify.get('tiering_policy'):
+ vol['tieringPolicy'] = self.parameters.get('tiering_policy')
+ response, err, dummy = self.rest_api.send_request("PUT", "%s/volumes/%s/%s/%s" % (
+ self.rest_api.api_root_path, self.parameters['working_environment_id'], self.parameters['svm_name'],
+ self.parameters['name']), None, vol, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on modifying volume: %s, %s" % (str(err), str(response)))
+
+ def delete_volume(self):
+ response, err, dummy = self.rest_api.send_request("DELETE", "%s/volumes/%s/%s/%s" % (
+ self.rest_api.api_root_path, self.parameters['working_environment_id'], self.parameters['svm_name'],
+ self.parameters['name']), None, None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on deleting volume: %s, %s" % (str(err), str(response)))
+
+ def get_initiator(self, alias_name):
+ response, err, dummy = self.rest_api.send_request("GET", "%s/volumes/initiator" % (
+ self.rest_api.api_root_path), None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on getting initiator: %s, %s" % (str(err), str(response)))
+ result = dict()
+ if response is None:
+ return None
+ for initiator in response:
+ if initiator.get('aliasName') and initiator.get('aliasName') == alias_name:
+ result['alias'] = initiator.get('aliasName')
+ result['iqn'] = initiator.get('iqn')
+ return result
+ return None
+
+ def create_initiator(self, initiator):
+ ini = self.na_helper.convert_module_args_to_api(initiator)
+ response, err, dummy = self.rest_api.send_request("POST", "%s/volumes/initiator" % (
+ self.rest_api.api_root_path), None, ini, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on creating initiator: %s, %s" % (str(err), str(response)))
+
+ def get_igroup(self, igroup_name):
+ response, err, dummy = self.rest_api.send_request("GET", "%s/volumes/igroups/%s/%s" % (
+ self.rest_api.api_root_path, self.parameters['working_environment_id'], self.parameters['svm_name']),
+ None, None, header=self.headers)
+ if err is not None:
+ self.module.fail_json(changed=False, msg="Error: unexpected response on getting igroup: %s, %s" % (str(err), str(response)))
+ result = dict()
+ if response is None:
+ return None
+ for igroup in response:
+ if igroup['igroupName'] == igroup_name:
+ result['igroup_name'] = igroup['igroupName']
+ result['os_type'] = igroup['osType']
+ result['portset_name'] = igroup['portsetName']
+ result['igroup_type'] = igroup['igroupType']
+ result['initiators'] = igroup['initiators']
+ return result
+ return None
+
+ def iscsi_volume_helper(self):
+ quote = dict()
+ quote['iscsiInfo'] = dict()
+ if self.parameters.get('igroups'):
+ current_igroups = []
+ for igroup in self.parameters['igroups']:
+ current = self.get_igroup(igroup)
+ current_igroups.append(current)
+ for igroup in current_igroups:
+ if igroup is None:
+ quote['iscsiInfo']['igroupCreationRequest'] = dict()
+ quote['iscsiInfo']['igroupCreationRequest']['igroupName'] = self.parameters['igroups'][0]
+ iqn_list = []
+ for initiator in self.parameters['initiators']:
+ if initiator.get('iqn'):
+ iqn_list.append(initiator['iqn'])
+ current_initiator = self.get_initiator(initiator['alias'])
+ if current_initiator is None:
+ initiator_request = dict()
+ if initiator.get('alias'):
+ initiator_request['aliasName'] = initiator['alias']
+ if initiator.get('iqn'):
+ initiator_request['iqn'] = initiator['iqn']
+ self.create_initiator(initiator_request)
+ quote['iscsiInfo']['igroupCreationRequest']['initiators'] = iqn_list
+ quote['iscsiInfo']['osName'] = self.parameters['os_name']
+
+ else:
+ quote['iscsiInfo']['igroups'] = self.parameters['igroups']
+ quote['iscsiInfo']['osName'] = self.parameters['os_name']
+ return quote
+
+ def apply(self):
+ current = self.get_volume()
+ cd_action, modify = None, None
+ cd_action = self.na_helper.get_cd_action(current, self.parameters)
+ if cd_action is None:
+ modify = self.na_helper.get_modified_attributes(current, self.parameters)
+ unmodifiable = []
+ for attr in modify:
+ if attr not in ['export_policy_ip', 'export_policy_nfs_version', 'snapshot_policy_name', 'users',
+ 'permission', 'tiering_policy', 'snapshot_policy_name']:
+ unmodifiable.append(attr)
+ if len(unmodifiable) > 0:
+ self.module.fail_json(changed=False, msg="%s cannot be modified." % str(unmodifiable))
+ if self.na_helper.changed and not self.module.check_mode:
+ if cd_action == 'create':
+ self.create_volume()
+ elif cd_action == 'delete':
+ self.delete_volume()
+ elif modify:
+ self.modify_volume(modify)
+ self.module.exit_json(changed=self.na_helper.changed)
+
+
+def main():
+ '''Main Function'''
+ volume = NetAppCloudmanagerVolume()
+ volume.apply()
+
+
+if __name__ == '__main__':
+ main()