summaryrefslogtreecommitdiffstats
path: root/src/pybind/mgr/ansible
diff options
context:
space:
mode:
Diffstat (limited to 'src/pybind/mgr/ansible')
-rw-r--r--src/pybind/mgr/ansible/.gitignore1
-rw-r--r--src/pybind/mgr/ansible/CMakeLists.txt7
-rw-r--r--src/pybind/mgr/ansible/__init__.py9
-rw-r--r--src/pybind/mgr/ansible/ansible_runner_svc.py326
-rw-r--r--src/pybind/mgr/ansible/module.py729
-rw-r--r--src/pybind/mgr/ansible/output_wizards.py158
-rw-r--r--src/pybind/mgr/ansible/requirements.txt2
-rw-r--r--src/pybind/mgr/ansible/run-tox.sh43
-rw-r--r--src/pybind/mgr/ansible/tests/__init__.py0
-rw-r--r--src/pybind/mgr/ansible/tests/pb_execution_events.data183
-rw-r--r--src/pybind/mgr/ansible/tests/test_client_playbooks.py287
-rw-r--r--src/pybind/mgr/ansible/tests/test_output_wizards.py207
-rw-r--r--src/pybind/mgr/ansible/tox.ini18
13 files changed, 1970 insertions, 0 deletions
diff --git a/src/pybind/mgr/ansible/.gitignore b/src/pybind/mgr/ansible/.gitignore
new file mode 100644
index 00000000..43a2d269
--- /dev/null
+++ b/src/pybind/mgr/ansible/.gitignore
@@ -0,0 +1 @@
+wheelhouse*
diff --git a/src/pybind/mgr/ansible/CMakeLists.txt b/src/pybind/mgr/ansible/CMakeLists.txt
new file mode 100644
index 00000000..c706aa8c
--- /dev/null
+++ b/src/pybind/mgr/ansible/CMakeLists.txt
@@ -0,0 +1,7 @@
+set(MGR_ANSIBLE_VIRTUALENV ${CEPH_BUILD_VIRTUALENV}/mgr-ansible-virtualenv)
+
+add_custom_target(mgr-ansible-test-venv
+ COMMAND ${CMAKE_SOURCE_DIR}/src/tools/setup-virtualenv.sh --python=${MGR_PYTHON_EXECUTABLE} ${MGR_ANSIBLE_VIRTUALENV}
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/pybind/mgr/ansible
+ COMMENT "ansible tests virtualenv is being created")
+add_dependencies(tests mgr-ansible-test-venv)
diff --git a/src/pybind/mgr/ansible/__init__.py b/src/pybind/mgr/ansible/__init__.py
new file mode 100644
index 00000000..ea61a12f
--- /dev/null
+++ b/src/pybind/mgr/ansible/__init__.py
@@ -0,0 +1,9 @@
+from __future__ import absolute_import
+import os
+
+if 'UNITTEST' not in os.environ:
+ from .module import Module
+else:
+ import sys
+ import mock
+ sys.modules['ceph_module'] = mock.Mock()
diff --git a/src/pybind/mgr/ansible/ansible_runner_svc.py b/src/pybind/mgr/ansible/ansible_runner_svc.py
new file mode 100644
index 00000000..636e20b0
--- /dev/null
+++ b/src/pybind/mgr/ansible/ansible_runner_svc.py
@@ -0,0 +1,326 @@
+"""
+Client module to interact with the Ansible Runner Service
+"""
+import requests
+import json
+import re
+from functools import wraps
+
+# Ansible Runner service API endpoints
+API_URL = "api"
+LOGIN_URL = "api/v1/login"
+PLAYBOOK_EXEC_URL = "api/v1/playbooks"
+PLAYBOOK_EVENTS = "api/v1/jobs/%s/events"
+EVENT_DATA_URL = "api/v1/jobs/%s/events/%s"
+
+class AnsibleRunnerServiceError(Exception):
+ pass
+
+def handle_requests_exceptions(func):
+ """Decorator to manage errors raised by requests library
+ """
+ @wraps(func)
+ def inner(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except requests.exceptions.RequestException as ex:
+ raise AnsibleRunnerServiceError(str(ex))
+ return inner
+
+class ExecutionStatusCode(object):
+ """Execution status of playbooks ( 'msg' field in playbook status request)
+ """
+
+ SUCCESS = 0 # Playbook has been executed succesfully" msg = successful
+ ERROR = 1 # Playbook has finished with error msg = failed
+ ON_GOING = 2 # Playbook is being executed msg = running
+ NOT_LAUNCHED = 3 # Not initialized
+
+class PlayBookExecution(object):
+ """Object to provide all the results of a Playbook execution
+ """
+
+ def __init__(self, rest_client, playbook, logger, result_pattern="",
+ the_params=None,
+ querystr_dict=None):
+
+ self.rest_client = rest_client
+
+ # Identifier of the playbook execution
+ self.play_uuid = "-"
+
+ # Pattern used to extract the result from the events
+ self.result_task_pattern = result_pattern
+
+ # Playbook name
+ self.playbook = playbook
+
+ # Parameters used in the playbook
+ self.params = the_params
+
+ # Query string used in the "launch" request
+ self.querystr_dict = querystr_dict
+
+ # Logger
+ self.log = logger
+
+ def launch(self):
+ """ Launch the playbook execution
+ """
+
+ response = None
+ endpoint = "%s/%s" % (PLAYBOOK_EXEC_URL, self.playbook)
+
+ try:
+ response = self.rest_client.http_post(endpoint,
+ self.params,
+ self.querystr_dict)
+ except AnsibleRunnerServiceError:
+ self.log.exception("Error launching playbook <%s>", self.playbook)
+ raise
+
+ # Here we have a server response, but an error trying
+ # to launch the playbook is also posible (ex. 404, playbook not found)
+ # Error already logged by rest_client, but an error should be raised
+ # to the orchestrator (via completion object)
+ if response.ok:
+ self.play_uuid = json.loads(response.text)["data"]["play_uuid"]
+ self.log.info("Playbook execution launched succesfuly")
+ else:
+ raise AnsibleRunnerServiceError(response.reason)
+
+ def get_status(self):
+ """ Return the status of the execution
+
+ In the msg field of the respons we can find:
+ "msg": "successful"
+ "msg": "running"
+ "msg": "failed"
+ """
+
+ status_value = ExecutionStatusCode.NOT_LAUNCHED
+ response = None
+
+ if self.play_uuid == '-': # Initialized
+ status_value = ExecutionStatusCode.NOT_LAUNCHED
+ elif self.play_uuid == '': # Error launching playbook
+ status_value = ExecutionStatusCode.ERROR
+ else:
+ endpoint = "%s/%s" % (PLAYBOOK_EXEC_URL, self.play_uuid)
+
+ try:
+ response = self.rest_client.http_get(endpoint)
+ except AnsibleRunnerServiceError:
+ self.log.exception("Error getting playbook <%s> status",
+ self.playbook)
+
+ if response:
+ the_status = json.loads(response.text)["msg"]
+ if the_status == 'successful':
+ status_value = ExecutionStatusCode.SUCCESS
+ elif the_status == 'failed':
+ status_value = ExecutionStatusCode.ERROR
+ else:
+ status_value = ExecutionStatusCode.ON_GOING
+ else:
+ status_value = ExecutionStatusCode.ERROR
+
+ self.log.info("Requested playbook execution status is: %s", status_value)
+ return status_value
+
+ def get_result(self, event_filter=""):
+ """Get the data of the events filtered by a task pattern and
+ a event filter
+
+ @returns: the events that matches with the patterns provided
+ """
+ response = None
+ if not self.play_uuid:
+ return {}
+
+ try:
+ response = self.rest_client.http_get(PLAYBOOK_EVENTS % self.play_uuid)
+ except AnsibleRunnerServiceError:
+ self.log.exception("Error getting playbook <%s> result", self.playbook)
+
+ if not response:
+ result_events = {}
+ else:
+ events = json.loads(response.text)["data"]["events"]
+
+ if self.result_task_pattern:
+ result_events = {event:data for event,data in events.items()
+ if "task" in data and
+ re.match(self.result_task_pattern, data["task"])}
+ else:
+ result_events = events
+
+ if event_filter:
+ result_events = {event:data for event,data in result_events.items()
+ if re.match(event_filter, data['event'])}
+
+ self.log.info("Requested playbook result is: %s", json.dumps(result_events))
+ return result_events
+
+class Client(object):
+ """An utility object that allows to connect with the Ansible runner service
+ and execute easily playbooks
+ """
+
+ def __init__(self, server_url, user, password, verify_server, logger):
+ """Provide an https client to make easy interact with the Ansible
+ Runner Service"
+
+ :param servers_url: The base URL >server>:<port> of the Ansible Runner Service
+ :param user: Username of the authorized user
+ :param password: Password of the authorized user
+ :param verify_server: Either a boolean, in which case it controls whether we verify
+ the server's TLS certificate, or a string, in which case it must be a path
+ to a CA bundle to use. Defaults to ``True``.
+ :param logger: Log file
+ """
+ self.server_url = server_url
+ self.user = user
+ self.password = password
+ self.log = logger
+ self.auth = (self.user, self.password)
+ if not verify_server:
+ self.verify_server = True
+ elif verify_server.lower().strip() == 'false':
+ self.verify_server = False
+ else:
+ self.verify_server = verify_server
+
+ # Once authenticated this token will be used in all the requests
+ self.token = ""
+
+ self.server_url = "https://{0}".format(self.server_url)
+
+ # Log in the server and get a token
+ self.login()
+
+ @handle_requests_exceptions
+ def login(self):
+ """ Login with user credentials to obtain a valid token
+ """
+
+ the_url = "%s/%s" % (self.server_url, LOGIN_URL)
+ response = requests.get(the_url,
+ auth = self.auth,
+ verify = self.verify_server)
+
+ if response.status_code != requests.codes.ok:
+ self.log.error("login error <<%s>> (%s):%s",
+ the_url, response.status_code, response.text)
+ else:
+ self.log.info("login succesful <<%s>> (%s):%s",
+ the_url, response.status_code, response.text)
+
+ if response:
+ self.token = json.loads(response.text)["data"]["token"]
+ self.log.info("Connection with Ansible Runner Service is operative")
+
+ @handle_requests_exceptions
+ def is_operative(self):
+ """Indicates if the connection with the Ansible runner Server is ok
+ """
+
+ # No Token... this means we haven't used yet the service.
+ if not self.token:
+ return False
+
+ # Check the service
+ response = self.http_get(API_URL)
+
+ if response:
+ return response.status_code == requests.codes.ok
+ else:
+ return False
+
+ @handle_requests_exceptions
+ def http_get(self, endpoint):
+ """Execute an http get request
+
+ :param endpoint: Ansible Runner service RESTful API endpoint
+
+ :returns: A requests object
+ """
+
+ the_url = "%s/%s" % (self.server_url, endpoint)
+ response = requests.get(the_url,
+ verify = self.verify_server,
+ headers = {"Authorization": self.token})
+
+ if response.status_code != requests.codes.ok:
+ self.log.error("http GET %s <--> (%s - %s)\n%s",
+ the_url, response.status_code, response.reason,
+ response.text)
+ else:
+ self.log.info("http GET %s <--> (%s - %s)",
+ the_url, response.status_code, response.text)
+
+ return response
+
+ @handle_requests_exceptions
+ def http_post(self, endpoint, payload, params_dict):
+ """Execute an http post request
+
+ :param endpoint: Ansible Runner service RESTful API endpoint
+ :param payload: Dictionary with the data used in the post request
+ :param params_dict: A dict used to build a query string
+
+ :returns: A requests object
+ """
+
+ the_url = "%s/%s" % (self.server_url, endpoint)
+ response = requests.post(the_url,
+ verify = self.verify_server,
+ headers = {"Authorization": self.token,
+ "Content-type": "application/json"},
+ json = payload,
+ params = params_dict)
+
+ if response.status_code != requests.codes.ok:
+ self.log.error("http POST %s [%s] <--> (%s - %s:%s)\n",
+ the_url, payload, response.status_code,
+ response.reason, response.text)
+ else:
+ self.log.info("http POST %s <--> (%s - %s)",
+ the_url, response.status_code, response.text)
+
+ return response
+
+ @handle_requests_exceptions
+ def http_delete(self, endpoint):
+ """Execute an http delete request
+
+ :param endpoint: Ansible Runner service RESTful API endpoint
+
+ :returns: A requests object
+ """
+
+ the_url = "%s/%s" % (self.server_url, endpoint)
+ response = requests.delete(the_url,
+ verify = self.verify_server,
+ headers = {"Authorization": self.token})
+
+ if response.status_code != requests.codes.ok:
+ self.log.error("http DELETE %s <--> (%s - %s)\n%s",
+ the_url, response.status_code, response.reason,
+ response.text)
+ else:
+ self.log.info("http DELETE %s <--> (%s - %s)",
+ the_url, response.status_code, response.text)
+
+ return response
+
+ def http_put(self, endpoint, payload):
+ """Execute an http put request
+
+ :param endpoint: Ansible Runner service RESTful API endpoint
+ :param payload: Dictionary with the data used in the put request
+
+ :returns: A requests object
+ """
+ # TODO
+ raise NotImplementedError("TODO")
diff --git a/src/pybind/mgr/ansible/module.py b/src/pybind/mgr/ansible/module.py
new file mode 100644
index 00000000..81e1c40a
--- /dev/null
+++ b/src/pybind/mgr/ansible/module.py
@@ -0,0 +1,729 @@
+"""
+ceph-mgr Ansible orchestrator module
+
+The external Orchestrator is the Ansible runner service (RESTful https service)
+"""
+
+# pylint: disable=abstract-method, no-member, bad-continuation
+
+import json
+import requests
+
+from mgr_module import MgrModule
+import orchestrator
+
+from .ansible_runner_svc import Client, PlayBookExecution, ExecutionStatusCode,\
+ AnsibleRunnerServiceError
+
+from .output_wizards import ProcessInventory, ProcessPlaybookResult, \
+ ProcessHostsList
+
+# Time to clean the completions list
+WAIT_PERIOD = 10
+
+# List of playbooks names used
+
+# Name of the playbook used in the "get_inventory" method.
+# This playbook is expected to provide a list of storage devices in the host
+# where the playbook is executed.
+GET_STORAGE_DEVICES_CATALOG_PLAYBOOK = "storage-inventory.yml"
+
+# Used in the create_osd method
+ADD_OSD_PLAYBOOK = "add-osd.yml"
+
+# Used in the remove_osds method
+REMOVE_OSD_PLAYBOOK = "shrink-osd.yml"
+
+# Default name for the inventory group for hosts managed by the Orchestrator
+ORCHESTRATOR_GROUP = "orchestrator"
+
+# URLs for Ansible Runner Operations
+# Add or remove host in one group
+URL_ADD_RM_HOSTS = "api/v1/hosts/{host_name}/groups/{inventory_group}"
+
+# Retrieve the groups where the host is included in.
+URL_GET_HOST_GROUPS = "api/v1/hosts/{host_name}"
+# Manage groups
+URL_MANAGE_GROUP = "api/v1/groups/{group_name}"
+# URLs for Ansible Runner Operations
+URL_GET_HOSTS = "api/v1/hosts"
+
+
+class AnsibleReadOperation(orchestrator.ReadCompletion):
+ """ A read operation means to obtain information from the cluster.
+ """
+ def __init__(self, client, logger):
+ """
+ :param client : Ansible Runner Service Client
+ :param logger : The object used to log messages
+ """
+ super(AnsibleReadOperation, self).__init__()
+
+ # Private attributes
+ self._is_complete = False
+ self._is_errored = False
+ self._result = []
+ self._status = ExecutionStatusCode.NOT_LAUNCHED
+
+ # Object used to process operation result in different ways
+ self.output_wizard = None
+
+ # Error description in operation
+ self.error = ""
+
+ # Ansible Runner Service client
+ self.ar_client = client
+
+ # Logger
+ self.log = logger
+
+ # OutputWizard object used to process the result
+ self.output_wizard = None
+
+ @property
+ def is_complete(self):
+ return self._is_complete
+
+ @property
+ def is_errored(self):
+ return self._is_errored
+
+ @property
+ def result(self):
+ return self._result
+
+ @property
+ def status(self):
+ """Retrieve the current status of the operation and update state
+ attributes
+ """
+ raise NotImplementedError()
+
+class ARSOperation(AnsibleReadOperation):
+ """Execute an Ansible Runner Service Operation
+ """
+
+ def __init__(self, client, logger, url, get_operation=True, payload=None):
+ """
+ :param client : Ansible Runner Service Client
+ :param logger : The object used to log messages
+ :param url : The Ansible Runner Service URL that provides
+ the operation
+ :param get_operation : True if operation is provided using an http GET
+ :param payload : http request payload
+ """
+ super(ARSOperation, self).__init__(client, logger)
+
+ self.url = url
+ self.get_operation = get_operation
+ self.payload = payload
+
+ def __str__(self):
+ return "Ansible Runner Service: {operation} {url}".format(
+ operation="GET" if self.get_operation else "POST",
+ url=self.url)
+
+ @property
+ def status(self):
+ """ Execute the Ansible Runner Service operation and update the status
+ and result of the underlying Completion object.
+ """
+
+ # Execute the right kind of http request
+ if self.get_operation:
+ response = self.ar_client.http_get(self.url)
+ else:
+ response = self.ar_client.http_post(self.url, self.payload)
+
+ # If no connection errors, the operation is complete
+ self._is_complete = True
+
+ # Depending of the response, status and result is updated
+ if not response:
+ self._is_errored = True
+ self._status = ExecutionStatusCode.ERROR
+ self._result = "Ansible Runner Service not Available"
+ else:
+ self._is_errored = (response.status_code != requests.codes.ok)
+
+ if not self._is_errored:
+ self._status = ExecutionStatusCode.SUCCESS
+ if self.output_wizard:
+ self._result = self.output_wizard.process(self.url,
+ response.text)
+ else:
+ self._result = response.text
+ else:
+ self._status = ExecutionStatusCode.ERROR
+ self._result = response.reason
+
+ return self._status
+
+
+class PlaybookOperation(AnsibleReadOperation):
+ """Execute a playbook using the Ansible Runner Service
+ """
+
+ def __init__(self, client, playbook, logger, result_pattern,
+ params,
+ querystr_dict={}):
+ """
+ :param client : Ansible Runner Service Client
+ :param playbook : The playbook to execute
+ :param logger : The object used to log messages
+ :param result_pattern: The "pattern" to discover what execution events
+ have the information deemed as result
+ :param params : http request payload for the playbook execution
+ :param querystr_dict : http request querystring for the playbook
+ execution (DO NOT MODIFY HERE)
+
+ """
+ super(PlaybookOperation, self).__init__(client, logger)
+
+ # Private attributes
+ self.playbook = playbook
+
+ # An aditional filter of result events based in the event
+ self.event_filter = ""
+
+ # Playbook execution object
+ self.pb_execution = PlayBookExecution(client,
+ playbook,
+ logger,
+ result_pattern,
+ params,
+ querystr_dict)
+
+ def __str__(self):
+ return "Playbook {playbook_name}".format(playbook_name=self.playbook)
+
+ @property
+ def status(self):
+ """Check the status of the playbook execution and update the status
+ and result of the underlying Completion object.
+ """
+
+ if self._status in [ExecutionStatusCode.ON_GOING,
+ ExecutionStatusCode.NOT_LAUNCHED]:
+ self._status = self.pb_execution.get_status()
+
+ self._is_complete = (self._status == ExecutionStatusCode.SUCCESS) or \
+ (self._status == ExecutionStatusCode.ERROR)
+
+ self._is_errored = (self._status == ExecutionStatusCode.ERROR)
+
+ if self._is_complete:
+ self.update_result()
+
+ return self._status
+
+ def execute_playbook(self):
+ """Launch the execution of the playbook with the parameters configured
+ """
+ try:
+ self.pb_execution.launch()
+ except AnsibleRunnerServiceError:
+ self._status = ExecutionStatusCode.ERROR
+ raise
+
+ def update_result(self):
+ """Output of the read operation
+
+ The result of the playbook execution can be customized through the
+ function provided as 'process_output' attribute
+
+ :return string: Result of the operation formatted if it is possible
+ """
+
+ processed_result = []
+
+ if self._is_complete:
+ raw_result = self.pb_execution.get_result(self.event_filter)
+
+ if self.output_wizard:
+ processed_result = self.output_wizard.process(self.pb_execution.play_uuid,
+ raw_result)
+ else:
+ processed_result = raw_result
+
+ self._result = processed_result
+
+
+class AnsibleChangeOperation(orchestrator.WriteCompletion):
+ """Operations that changes the "cluster" state
+
+ Modifications/Changes (writes) are a two-phase thing, firstly execute
+ the playbook that is going to change elements in the Ceph Cluster.
+ When the playbook finishes execution (independently of the result),
+ the modification/change operation has finished.
+ """
+ def __init__(self):
+ super(AnsibleChangeOperation, self).__init__()
+
+ self._status = ExecutionStatusCode.NOT_LAUNCHED
+ self._result = None
+
+ # Object used to process operation result in different ways
+ self.output_wizard = None
+
+ @property
+ def status(self):
+ """Return the status code of the operation
+ """
+ raise NotImplementedError()
+
+ @property
+ def is_persistent(self):
+ """
+ Has the operation updated the orchestrator's configuration
+ persistently? Typically this would indicate that an update
+ had been written to a manifest, but that the update
+ had not necessarily been pushed out to the cluster.
+
+ :return Boolean: True if the execution of the Ansible Playbook or the
+ operation over the Ansible Runner Service has finished
+ """
+
+ return self._status in [ExecutionStatusCode.SUCCESS,
+ ExecutionStatusCode.ERROR]
+
+ @property
+ def is_effective(self):
+ """Has the operation taken effect on the cluster?
+ For example, if we were adding a service, has it come up and appeared
+ in Ceph's cluster maps?
+
+ In the case of Ansible, this will be True if the playbooks has been
+ executed succesfully.
+
+ :return Boolean: if the playbook/ARS operation has been executed
+ succesfully
+ """
+
+ return self._status == ExecutionStatusCode.SUCCESS
+
+ @property
+ def is_errored(self):
+ return self._status == ExecutionStatusCode.ERROR
+
+ @property
+ def result(self):
+ return self._result
+
+class HttpOperation(object):
+ """A class to ease the management of http operations
+ """
+
+ def __init__(self, url, http_operation, payload="", query_string="{}"):
+ self.url = url
+ self.http_operation = http_operation
+ self.payload = payload
+ self.query_string = query_string
+ self.response = None
+
+class ARSChangeOperation(AnsibleChangeOperation):
+ """Execute one or more Ansible Runner Service Operations that implies
+ a change in the cluster
+ """
+ def __init__(self, client, logger, operations):
+ """
+ :param client : Ansible Runner Service Client
+ :param logger : The object used to log messages
+ :param operations : A list of http_operation objects
+ :param payload : dict with http request payload
+ """
+ super(ARSChangeOperation, self).__init__()
+
+ assert operations, "At least one operation is needed"
+ self.ar_client = client
+ self.log = logger
+ self.operations = operations
+
+ def __str__(self):
+ # Use the last operation as the main
+ return "Ansible Runner Service: {operation} {url}".format(
+ operation=self.operations[-1].http_operation,
+ url=self.operations[-1].url)
+
+ @property
+ def status(self):
+ """Execute the Ansible Runner Service operations and update the status
+ and result of the underlying Completion object.
+ """
+
+ for my_request in self.operations:
+ # Execute the right kind of http request
+ try:
+ if my_request.http_operation == "post":
+ response = self.ar_client.http_post(my_request.url,
+ my_request.payload,
+ my_request.query_string)
+ elif my_request.http_operation == "delete":
+ response = self.ar_client.http_delete(my_request.url)
+ elif my_request.http_operation == "get":
+ response = self.ar_client.http_get(my_request.url)
+
+ # Any problem executing the secuence of operations will
+ # produce an errored completion object.
+ if response.status_code != requests.codes.ok:
+ self._status = ExecutionStatusCode.ERROR
+ self._result = response.text
+ return self._status
+
+ # Any kind of error communicating with ARS or preventing
+ # to have a right http response
+ except AnsibleRunnerServiceError as ex:
+ self._status = ExecutionStatusCode.ERROR
+ self._result = str(ex)
+ return self._status
+
+ # If this point is reached, all the operations has been succesfuly
+ # executed, and the final result is updated
+ self._status = ExecutionStatusCode.SUCCESS
+ if self.output_wizard:
+ self._result = self.output_wizard.process("", response.text)
+ else:
+ self._result = response.text
+
+ return self._status
+
+class Module(MgrModule, orchestrator.Orchestrator):
+ """An Orchestrator that uses <Ansible Runner Service> to perform operations
+ """
+
+ MODULE_OPTIONS = [
+ {'name': 'server_url'},
+ {'name': 'username'},
+ {'name': 'password'},
+ {'name': 'verify_server'} # Check server identity (Boolean/path to CA bundle)
+ ]
+
+ def __init__(self, *args, **kwargs):
+ super(Module, self).__init__(*args, **kwargs)
+
+ self.run = False
+
+ self.all_completions = []
+
+ self.ar_client = None
+
+ def available(self):
+ """ Check if Ansible Runner service is working
+ """
+ # TODO
+ return (True, "Everything ready")
+
+ def wait(self, completions):
+ """Given a list of Completion instances, progress any which are
+ incomplete.
+
+ :param completions: list of Completion instances
+ :Returns : True if everything is done.
+ """
+
+ # Check progress and update status in each operation
+ # Access completion.status property do the trick
+ for operation in completions:
+ self.log.info("<%s> status:%s", operation, operation.status)
+
+ completions = filter(lambda x: not x.is_complete, completions)
+
+ ops_pending = len(completions)
+ self.log.info("Operations pending: %s", ops_pending)
+
+ return ops_pending == 0
+
+ def serve(self):
+ """ Mandatory for standby modules
+ """
+ self.log.info("Starting Ansible Orchestrator module ...")
+
+ # Verify config options (Just that settings are available)
+ self.verify_config()
+
+ # Ansible runner service client
+ try:
+ self.ar_client = Client(server_url=self.get_module_option('server_url', ''),
+ user=self.get_module_option('username', ''),
+ password=self.get_module_option('password', ''),
+ verify_server=self.get_module_option('verify_server', True),
+ logger=self.log)
+ except AnsibleRunnerServiceError:
+ self.log.exception("Ansible Runner Service not available. "
+ "Check external server status/TLS identity or "
+ "connection options. If configuration options changed"
+ " try to disable/enable the module.")
+ self.shutdown()
+ return
+
+ self.run = True
+
+ def shutdown(self):
+
+ self.log.info('Stopping Ansible orchestrator module')
+ self.run = False
+
+ def get_inventory(self, node_filter=None, refresh=False):
+ """
+
+ :param : node_filter instance
+ :param : refresh any cached state
+ :Return : A AnsibleReadOperation instance (Completion Object)
+ """
+
+ # Create a new read completion object for execute the playbook
+ playbook_operation = PlaybookOperation(client=self.ar_client,
+ playbook=GET_STORAGE_DEVICES_CATALOG_PLAYBOOK,
+ logger=self.log,
+ result_pattern="list storage inventory",
+ params={})
+
+
+ # Assign the process_output function
+ playbook_operation.output_wizard = ProcessInventory(self.ar_client,
+ self.log)
+ playbook_operation.event_filter = "runner_on_ok"
+
+ # Execute the playbook to obtain data
+ self._launch_operation(playbook_operation)
+
+ return playbook_operation
+
+ def create_osds(self, drive_group, all_hosts):
+ """Create one or more OSDs within a single Drive Group.
+ If no host provided the operation affects all the host in the OSDS role
+
+
+ :param drive_group: (orchestrator.DriveGroupSpec),
+ Drive group with the specification of drives to use
+ :param all_hosts : (List[str]),
+ List of hosts where the OSD's must be created
+ """
+
+ # Transform drive group specification to Ansible playbook parameters
+ host, osd_spec = dg_2_ansible(drive_group)
+
+ # Create a new read completion object for execute the playbook
+ playbook_operation = PlaybookOperation(client=self.ar_client,
+ playbook=ADD_OSD_PLAYBOOK,
+ logger=self.log,
+ result_pattern="",
+ params=osd_spec,
+ querystr_dict={"limit": host})
+
+ # Filter to get the result
+ playbook_operation.output_wizard = ProcessPlaybookResult(self.ar_client,
+ self.log)
+ playbook_operation.event_filter = "playbook_on_stats"
+
+ # Execute the playbook
+ self._launch_operation(playbook_operation)
+
+ return playbook_operation
+
+ def remove_osds(self, osd_ids):
+ """Remove osd's.
+
+ :param osd_ids: List of osd's to be removed (List[int])
+ """
+
+ extravars = {'osd_to_kill': ",".join([str(osd_id) for osd_id in osd_ids]),
+ 'ireallymeanit':'yes'}
+
+ # Create a new read completion object for execute the playbook
+ playbook_operation = PlaybookOperation(client=self.ar_client,
+ playbook=REMOVE_OSD_PLAYBOOK,
+ logger=self.log,
+ result_pattern="",
+ params=extravars)
+
+ # Filter to get the result
+ playbook_operation.output_wizard = ProcessPlaybookResult(self.ar_client,
+ self.log)
+ playbook_operation.event_filter = "playbook_on_stats"
+
+ # Execute the playbook
+ self._launch_operation(playbook_operation)
+
+ return playbook_operation
+
+ def get_hosts(self):
+ """Provides a list Inventory nodes
+ """
+
+ host_ls_op = ARSOperation(self.ar_client, self.log, URL_GET_HOSTS)
+
+ host_ls_op.output_wizard = ProcessHostsList(self.ar_client,
+ self.log)
+
+ return host_ls_op
+
+ def add_host(self, host):
+ """
+ Add a host to the Ansible Runner Service inventory in the "orchestrator"
+ group
+
+ :param host: hostname
+ :returns : orchestrator.WriteCompletion
+ """
+
+ url_group = URL_MANAGE_GROUP.format(group_name=ORCHESTRATOR_GROUP)
+
+ try:
+ # Create the orchestrator default group if not exist.
+ # If exists we ignore the error response
+ dummy_response = self.ar_client.http_post(url_group, "", {})
+
+ # Here, the default group exists so...
+ # Prepare the operation for adding the new host
+ add_url = URL_ADD_RM_HOSTS.format(host_name=host,
+ inventory_group=ORCHESTRATOR_GROUP)
+
+ operations = [HttpOperation(add_url, "post")]
+
+ except AnsibleRunnerServiceError as ex:
+ # Problems with the external orchestrator.
+ # Prepare the operation to return the error in a Completion object.
+ self.log.exception("Error checking <orchestrator> group: %s", ex)
+ operations = [HttpOperation(url_group, "post")]
+
+ return ARSChangeOperation(self.ar_client, self.log, operations)
+
+ def remove_host(self, host):
+ """
+ Remove a host from all the groups in the Ansible Runner Service
+ inventory.
+
+ :param host: hostname
+ :returns : orchestrator.WriteCompletion
+ """
+
+ operations = []
+ host_groups = []
+
+ try:
+ # Get the list of groups where the host is included
+ groups_url = URL_GET_HOST_GROUPS.format(host_name=host)
+ response = self.ar_client.http_get(groups_url)
+
+ if response.status_code == requests.codes.ok:
+ host_groups = json.loads(response.text)["data"]["groups"]
+
+ except AnsibleRunnerServiceError:
+ self.log.exception("Error retrieving host groups")
+
+ if not host_groups:
+ # Error retrieving the groups, prepare the completion object to
+ # execute the problematic operation just to provide the error
+ # to the caller
+ operations = [HttpOperation(groups_url, "get")]
+ else:
+ # Build the operations list
+ operations = list(map(lambda x:
+ HttpOperation(URL_ADD_RM_HOSTS.format(
+ host_name=host,
+ inventory_group=x),
+ "delete"),
+ host_groups))
+
+ return ARSChangeOperation(self.ar_client, self.log, operations)
+
+ def _launch_operation(self, ansible_operation):
+ """Launch the operation and add the operation to the completion objects
+ ongoing
+
+ :ansible_operation: A read/write ansible operation (completion object)
+ """
+
+ # Execute the playbook
+ ansible_operation.execute_playbook()
+
+ # Add the operation to the list of things ongoing
+ self.all_completions.append(ansible_operation)
+
+ def verify_config(self):
+ """ Verify configuration options for the Ansible orchestrator module
+ """
+ client_msg = ""
+
+ if not self.get_module_option('server_url', ''):
+ msg = "No Ansible Runner Service base URL <server_name>:<port>." \
+ "Try 'ceph config set mgr mgr/{0}/server_url " \
+ "<server name/ip>:<port>'".format(self.module_name)
+ self.log.error(msg)
+ client_msg += msg
+
+ if not self.get_module_option('username', ''):
+ msg = "No Ansible Runner Service user. " \
+ "Try 'ceph config set mgr mgr/{0}/username " \
+ "<string value>'".format(self.module_name)
+ self.log.error(msg)
+ client_msg += msg
+
+ if not self.get_module_option('password', ''):
+ msg = "No Ansible Runner Service User password. " \
+ "Try 'ceph config set mgr mgr/{0}/password " \
+ "<string value>'".format(self.module_name)
+ self.log.error(msg)
+ client_msg += msg
+
+ if not self.get_module_option('verify_server', ''):
+ msg = "TLS server identity verification is enabled by default." \
+ "Use 'ceph config set mgr mgr/{0}/verify_server False' " \
+ "to disable it. Use 'ceph config set mgr mgr/{0}/verify_server " \
+ "<path>' to point the CA bundle path used for " \
+ "verification".format(self.module_name)
+ self.log.error(msg)
+ client_msg += msg
+
+ if client_msg:
+ # Raise error
+ # TODO: Use OrchestratorValidationError
+ raise Exception(client_msg)
+
+
+
+# Auxiliary functions
+#==============================================================================
+def dg_2_ansible(drive_group):
+ """ Transform a drive group especification into:
+
+ a host : limit the playbook execution to this host
+ a osd_spec : dict of parameters to pass to the Ansible playbook used
+ to create the osds
+
+ :param drive_group: (type: DriveGroupSpec)
+
+ TODO: Possible this function will be removed/or modified heavily when
+ the ansible playbook to create osd's use ceph volume batch with
+ drive group parameter
+ """
+
+ # Limit the execution of the playbook to certain hosts
+ # TODO: Now only accepted "*" (all the hosts) or a host_name in the
+ # drive_group.host_pattern
+ # This attribute is intended to be used with "fnmatch" patterns, so when
+ # this become effective it will be needed to use the "get_inventory" method
+ # in order to have a list of hosts to be filtered with the "host_pattern"
+ if drive_group.host_pattern in ["*"]:
+ host = None # No limit in the playbook
+ else:
+ # For the moment, we assume that we only have 1 host
+ host = drive_group.host_pattern
+
+ # Compose the OSD configuration
+
+
+ osd = {}
+ osd["data"] = drive_group.data_devices.paths[0]
+ # Other parameters will be extracted in the same way
+ #osd["dmcrypt"] = drive_group.encryption
+
+ # lvm_volumes parameters
+ # (by the moment is what is accepted in the current playbook)
+ osd_spec = {"lvm_volumes":[osd]}
+
+ #Global scope variables also can be included in the osd_spec
+ #osd_spec["osd_objectstore"] = drive_group.objectstore
+
+ return host, osd_spec
diff --git a/src/pybind/mgr/ansible/output_wizards.py b/src/pybind/mgr/ansible/output_wizards.py
new file mode 100644
index 00000000..d924bf04
--- /dev/null
+++ b/src/pybind/mgr/ansible/output_wizards.py
@@ -0,0 +1,158 @@
+"""
+ceph-mgr Output Wizards module
+
+Output wizards are used to process results in different ways in
+completion objects
+"""
+
+# pylint: disable=bad-continuation
+
+import json
+
+
+from orchestrator import InventoryDevice, InventoryNode
+
+from .ansible_runner_svc import EVENT_DATA_URL
+
+class OutputWizard(object):
+ """Base class for help to process output in completion objects
+ """
+ def __init__(self, ar_client, logger):
+ """Make easy to work in output wizards using this attributes:
+
+ :param ars_client: Ansible Runner Service client
+ :param logger: log object
+ """
+ self.ar_client = ar_client
+ self.log = logger
+
+ def process(self, operation_id, raw_result):
+ """Make the magic here
+
+ :param operation_id: Allows to identify the Ansible Runner Service
+ operation whose result we wnat to process
+ :param raw_result: input for processing
+ """
+ raise NotImplementedError
+
+class ProcessInventory(OutputWizard):
+ """ Adapt the output of the playbook used in 'get_inventory'
+ to the Orchestrator expected output (list of InventoryNode)
+ """
+
+ def process(self, operation_id, raw_result):
+ """
+ :param operation_id: Playbook uuid
+ :param raw_result: events dict with the results
+
+ Example:
+ inventory_events =
+ {'37-100564f1-9fed-48c2-bd62-4ae8636dfcdb': {'host': '192.168.121.254',
+ 'task': 'list storage inventory',
+ 'event': 'runner_on_ok'},
+ '36-2016b900-e38f-7dcd-a2e7-00000000000e': {'host': '192.168.121.252'
+ 'task': 'list storage inventory',
+ 'event': 'runner_on_ok'}}
+
+ :return : list of InventoryNode
+ """
+ # Just making more readable the method
+ inventory_events = raw_result
+
+ #Obtain the needed data for each result event
+ inventory_nodes = []
+
+ # Loop over the result events and request the event data
+ for event_key, dummy_data in inventory_events.items():
+
+ event_response = self.ar_client.http_get(EVENT_DATA_URL %
+ (operation_id, event_key))
+
+ # self.pb_execution.play_uuid
+
+ # Process the data for each event
+ if event_response:
+ event_data = json.loads(event_response.text)["data"]["event_data"]
+
+ host = event_data["host"]
+
+ devices = json.loads(event_data["res"]["stdout"])
+ devs = []
+ for storage_device in devices:
+ dev = InventoryDevice.from_ceph_volume_inventory(storage_device)
+ devs.append(dev)
+
+ inventory_nodes.append(InventoryNode(host, devs))
+
+
+ return inventory_nodes
+
+class ProcessPlaybookResult(OutputWizard):
+ """ Provides the result of a playbook execution as plain text
+ """
+ def process(self, operation_id, raw_result):
+ """
+ :param operation_id: Playbook uuid
+ :param raw_result: events dict with the results
+
+ :return : String with the playbook execution event list
+ """
+ # Just making more readable the method
+ inventory_events = raw_result
+
+ result = ""
+
+ # Loop over the result events and request the data
+ for event_key, dummy_data in inventory_events.items():
+ event_response = self.ar_client.http_get(EVENT_DATA_URL %
+ (operation_id, event_key))
+
+ result += event_response.text
+
+ return result
+
+
+class ProcessHostsList(OutputWizard):
+ """ Format the output of host ls call
+ """
+ def process(self, operation_id, raw_result):
+ """ Format the output of host ls call
+
+ :param operation_id: Not used in this output wizard
+ :param raw_result: In this case is like the following json:
+ {
+ "status": "OK",
+ "msg": "",
+ "data": {
+ "hosts": [
+ "host_a",
+ "host_b",
+ ...
+ "host_x",
+ ]
+ }
+ }
+
+ :return: list of InventoryNodes
+ """
+ # Just making more readable the method
+ host_ls_json = raw_result
+
+ inventory_nodes = []
+
+ try:
+ json_resp = json.loads(host_ls_json)
+
+ for host in json_resp["data"]["hosts"]:
+ inventory_nodes.append(InventoryNode(host, []))
+
+ except ValueError:
+ self.log.exception("Malformed json response")
+ except KeyError:
+ self.log.exception("Unexpected content in Ansible Runner Service"
+ " response")
+ except TypeError:
+ self.log.exception("Hosts data must be iterable in Ansible Runner "
+ "Service response")
+
+ return inventory_nodes
diff --git a/src/pybind/mgr/ansible/requirements.txt b/src/pybind/mgr/ansible/requirements.txt
new file mode 100644
index 00000000..e75b578d
--- /dev/null
+++ b/src/pybind/mgr/ansible/requirements.txt
@@ -0,0 +1,2 @@
+tox==2.9.1
+importlib_metadata==2.1.0
diff --git a/src/pybind/mgr/ansible/run-tox.sh b/src/pybind/mgr/ansible/run-tox.sh
new file mode 100644
index 00000000..fd6da244
--- /dev/null
+++ b/src/pybind/mgr/ansible/run-tox.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+function dump_envvars {
+ echo "WITH_PYTHON2: ->$WITH_PYTHON2<-"
+ echo "WITH_PYTHON3: ->$WITH_PYTHON3<-"
+ echo "TOX_PATH: ->$TOX_PATH<-"
+ echo "ENV_LIST: ->$ENV_LIST<-"
+}
+
+# run from ./ or from ../
+: ${MGR_ANSIBLE_VIRTUALENV:=$CEPH_BUILD_DIR/mgr-ansible-virtualenv}
+: ${WITH_PYTHON2:=ON}
+: ${WITH_PYTHON3:=3}
+: ${CEPH_BUILD_DIR:=$PWD/.tox}
+test -d ansible && cd ansible
+
+if [ -e tox.ini ]; then
+ TOX_PATH=$(readlink -f tox.ini)
+else
+ TOX_PATH=$(readlink -f $(dirname $0)/tox.ini)
+fi
+
+# tox.ini will take care of this.
+unset PYTHONPATH
+export CEPH_BUILD_DIR=$CEPH_BUILD_DIR
+
+source ${MGR_ANSIBLE_VIRTUALENV}/bin/activate
+
+if [ "$WITH_PYTHON2" = "ON" ]; then
+ ENV_LIST+="py27,"
+fi
+# WITH_PYTHON3 might be set to "ON" or to the python3 RPM version number
+# prevailing on the system - e.g. "3", "36"
+if [[ "$WITH_PYTHON3" =~ (^3|^ON) ]]; then
+ ENV_LIST+="py3,"
+fi
+# use bash string manipulation to strip off any trailing comma
+ENV_LIST=${ENV_LIST%,}
+
+tox -c "${TOX_PATH}" -e "${ENV_LIST}" "$@"
+TOX_STATUS="$?"
+test "$TOX_STATUS" -ne "0" && dump_envvars
+exit $TOX_STATUS
diff --git a/src/pybind/mgr/ansible/tests/__init__.py b/src/pybind/mgr/ansible/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/pybind/mgr/ansible/tests/__init__.py
diff --git a/src/pybind/mgr/ansible/tests/pb_execution_events.data b/src/pybind/mgr/ansible/tests/pb_execution_events.data
new file mode 100644
index 00000000..248134a3
--- /dev/null
+++ b/src/pybind/mgr/ansible/tests/pb_execution_events.data
@@ -0,0 +1,183 @@
+{
+ "status": "OK",
+ "msg": "",
+ "data": {
+ "events": {
+ "2-6edf768f-2923-44e1-b884-f0227b811cfc": {
+ "event": "playbook_on_start"
+ },
+ "3-2016b900-e38f-7dcd-a2e7-000000000008": {
+ "event": "playbook_on_play_start"
+ },
+ "4-2016b900-e38f-7dcd-a2e7-000000000012": {
+ "event": "playbook_on_task_start",
+ "task": "Gathering Facts"
+ },
+ "5-19ae1e5e-aa2d-479e-845a-ef0253cc1f99": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.245",
+ "task": "Gathering Facts"
+ },
+ "6-aad3acc4-06a3-4c97-82ff-31e9e484b1f5": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.61",
+ "task": "Gathering Facts"
+ },
+ "7-55298017-3e7d-4734-b316-bbe13ce1da5e": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.254",
+ "task": "Gathering Facts"
+ },
+ "8-2016b900-e38f-7dcd-a2e7-00000000000a": {
+ "event": "playbook_on_task_start",
+ "task": "setup"
+ },
+ "9-2085ccb6-e337-4b9f-bc38-1d8bbf9b973f": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.254",
+ "task": "setup"
+ },
+ "10-e14cdbbc-4883-436c-a41c-a8194ec69075": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.245",
+ "task": "setup"
+ },
+ "11-6d815a26-df53-4240-b8b6-2484e88e4f48": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.61",
+ "task": "setup"
+ },
+ "12-2016b900-e38f-7dcd-a2e7-00000000000b": {
+ "event": "playbook_on_task_start",
+ "task": "Get a list of block devices (excludes loop and child devices)"
+ },
+ "13-799b0119-ccab-4eca-b30b-a37b0bafa02c": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.245",
+ "task": "Get a list of block devices (excludes loop and child devices)"
+ },
+ "14-6beb6958-4bfd-4a9c-bd2c-d20d00248605": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.61",
+ "task": "Get a list of block devices (excludes loop and child devices)"
+ },
+ "15-3ca99cc8-98ea-4967-8f2d-115426d00b6a": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.254",
+ "task": "Get a list of block devices (excludes loop and child devices)"
+ },
+ "16-2016b900-e38f-7dcd-a2e7-00000000000c": {
+ "event": "playbook_on_task_start",
+ "task": "check if disk {{ item }} is free"
+ },
+ "17-8c88141a-08d1-411f-a855-9f7702a49c4e": {
+ "event": "runner_item_on_failed",
+ "host": "192.168.121.245",
+ "task": "check if disk vda is free"
+ },
+ "18-4457db98-6f18-4f63-bfaa-584db5eea05b": {
+ "event": "runner_on_failed",
+ "host": "192.168.121.245",
+ "task": "check if disk {{ item }} is free"
+ },
+ "19-ac3c72cd-1fbb-495a-be69-53fa6029f356": {
+ "event": "runner_item_on_failed",
+ "host": "192.168.121.61",
+ "task": "check if disk vda is free"
+ },
+ "20-d161cb70-ba2e-4571-b029-c6428a566fef": {
+ "event": "runner_on_failed",
+ "host": "192.168.121.61",
+ "task": "check if disk {{ item }} is free"
+ },
+ "21-65f1ce5c-2d86-4cc3-8e10-cff6bf6cbd82": {
+ "event": "runner_item_on_failed",
+ "host": "192.168.121.254",
+ "task": "check if disk sda is free"
+ },
+ "22-7f86dcd4-4ef7-4f5a-9db3-c3780b67cc4b": {
+ "event": "runner_item_on_failed",
+ "host": "192.168.121.254",
+ "task": "check if disk sdb is free"
+ },
+ "23-837bf4f6-a912-46a8-b94b-55aa66a935c4": {
+ "event": "runner_item_on_ok",
+ "host": "192.168.121.254",
+ "task": "check if disk sdc is free"
+ },
+ "24-adf6238d-723f-4783-9226-8475419d466e": {
+ "event": "runner_item_on_failed",
+ "host": "192.168.121.254",
+ "task": "check if disk vda is free"
+ },
+ "25-554661d8-bc34-4885-a589-4960d6b8a487": {
+ "event": "runner_on_failed",
+ "host": "192.168.121.254",
+ "task": "check if disk {{ item }} is free"
+ },
+ "26-2016b900-e38f-7dcd-a2e7-00000000000d": {
+ "event": "playbook_on_task_start",
+ "task": "Update hosts freedisk list"
+ },
+ "27-52df484c-30a0-4e3b-9057-02ca345c5790": {
+ "event": "runner_item_on_skipped",
+ "host": "192.168.121.254",
+ "task": "Update hosts freedisk list"
+ },
+ "28-083616ad-3c1f-4fb8-a06c-5d64e670e362": {
+ "event": "runner_item_on_skipped",
+ "host": "192.168.121.254",
+ "task": "Update hosts freedisk list"
+ },
+ "29-bffc68d3-5448-491f-8780-07858285f5cd": {
+ "event": "runner_item_on_skipped",
+ "host": "192.168.121.245",
+ "task": "Update hosts freedisk list"
+ },
+ "30-cca2dfd9-16e9-4fcb-8bf7-c4da7dab5668": {
+ "event": "runner_on_skipped",
+ "host": "192.168.121.245",
+ "task": "Update hosts freedisk list"
+ },
+ "31-158a98ac-7e8d-4ebb-8c53-4467351a2d3a": {
+ "event": "runner_item_on_ok",
+ "host": "192.168.121.254",
+ "task": "Update hosts freedisk list"
+ },
+ "32-06a7e809-8d82-41df-b01d-45d94e519cb7": {
+ "event": "runner_item_on_skipped",
+ "host": "192.168.121.254",
+ "task": "Update hosts freedisk list"
+ },
+ "33-d5cdbb58-728a-4be5-abf1-4a051146e727": {
+ "event": "runner_item_on_skipped",
+ "host": "192.168.121.61",
+ "task": "Update hosts freedisk list"
+ },
+ "34-9b3c570b-22d8-4539-8c94-d0c1cbed8633": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.254",
+ "task": "Update hosts freedisk list"
+ },
+ "35-93336830-03cd-43ff-be87-a7e063ca7547": {
+ "event": "runner_on_skipped",
+ "host": "192.168.121.61",
+ "task": "Update hosts freedisk list"
+ },
+ "36-2016b900-e38f-7dcd-a2e7-00000000000e": {
+ "event": "playbook_on_task_start",
+ "task": "RESULTS"
+ },
+ "37-100564f1-9fed-48c2-bd62-4ae8636dfcdb": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.254",
+ "task": "RESULTS"
+ },
+ "38-20a64160-30a1-481f-a3ee-36e491bc7869": {
+ "event": "playbook_on_stats"
+ }
+ },
+ "total_events": 37
+ }
+}
+
diff --git a/src/pybind/mgr/ansible/tests/test_client_playbooks.py b/src/pybind/mgr/ansible/tests/test_client_playbooks.py
new file mode 100644
index 00000000..98dfd3dd
--- /dev/null
+++ b/src/pybind/mgr/ansible/tests/test_client_playbooks.py
@@ -0,0 +1,287 @@
+import logging
+import unittest
+import mock
+import json
+
+import requests_mock
+
+from requests.exceptions import ConnectionError
+
+from ..ansible_runner_svc import Client, PlayBookExecution, ExecutionStatusCode, \
+ LOGIN_URL, API_URL, PLAYBOOK_EXEC_URL, \
+ PLAYBOOK_EVENTS, AnsibleRunnerServiceError
+
+
+SERVER_URL = "ars:5001"
+USER = "admin"
+PASSWORD = "admin"
+CERTIFICATE = ""
+
+# Playbook attributes
+PB_NAME = "test_playbook"
+PB_UUID = "1733c3ac"
+
+# Playbook execution data file
+PB_EVENTS_FILE = "./tests/pb_execution_events.data"
+
+# create console handler and set level to info
+logger = logging.getLogger()
+handler = logging.StreamHandler()
+handler.setLevel(logging.INFO)
+formatter = logging.Formatter("%(levelname)s - %(message)s")
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+
+
+def mock_login(mock_server):
+
+ the_login_url = "https://%s/%s" % (SERVER_URL,LOGIN_URL)
+
+ mock_server.register_uri("GET",
+ the_login_url,
+ json={"status": "OK",
+ "msg": "Token returned",
+ "data": {"token": "dummy_token"}},
+ status_code=200)
+
+ the_api_url = "https://%s/%s" % (SERVER_URL,API_URL)
+ mock_server.register_uri("GET",
+ the_api_url,
+ text="<!DOCTYPE html>api</html>",
+ status_code=200)
+
+def mock_get_pb(mock_server, playbook_name, return_code):
+
+ mock_login(mock_server)
+
+ ars_client = Client(SERVER_URL, USER, PASSWORD,
+ CERTIFICATE, logger)
+
+ the_pb_url = "https://%s/%s/%s" % (SERVER_URL, PLAYBOOK_EXEC_URL, playbook_name)
+
+ if return_code == 404:
+ mock_server.register_uri("POST",
+ the_pb_url,
+ json={ "status": "NOTFOUND",
+ "msg": "playbook file not found",
+ "data": {}},
+ status_code=return_code)
+ elif return_code == 202:
+ mock_server.register_uri("POST",
+ the_pb_url,
+ json={ "status": "STARTED",
+ "msg": "starting",
+ "data": { "play_uuid": "1733c3ac" }},
+ status_code=return_code)
+
+ return PlayBookExecution(ars_client, playbook_name, logger,
+ result_pattern = "RESULTS")
+
+class ARSclientTest(unittest.TestCase):
+
+ def test_server_not_reachable(self):
+
+ with self.assertRaises(AnsibleRunnerServiceError):
+ ars_client = Client(SERVER_URL, USER, PASSWORD,
+ CERTIFICATE, logger)
+
+ def test_server_wrong_USER(self):
+
+ with requests_mock.Mocker() as mock_server:
+ the_login_url = "https://%s/%s" % (SERVER_URL,LOGIN_URL)
+ mock_server.get(the_login_url,
+ json={"status": "NOAUTH",
+ "msg": "Access denied invalid login: unknown USER",
+ "data": {}},
+ status_code=401)
+
+
+ ars_client = Client(SERVER_URL, USER, PASSWORD,
+ CERTIFICATE, logger)
+
+ self.assertFalse(ars_client.is_operative(),
+ "Operative attribute expected to be False")
+
+ def test_server_connection_ok(self):
+
+ with requests_mock.Mocker() as mock_server:
+
+ mock_login(mock_server)
+
+ ars_client = Client(SERVER_URL, USER, PASSWORD,
+ CERTIFICATE, logger)
+
+ self.assertTrue(ars_client.is_operative(),
+ "Operative attribute expected to be True")
+
+ def test_server_http_delete(self):
+
+ with requests_mock.Mocker() as mock_server:
+
+ mock_login(mock_server)
+
+ ars_client = Client(SERVER_URL, USER, PASSWORD,
+ CERTIFICATE, logger)
+
+ url = "https://%s/test" % (SERVER_URL)
+ mock_server.register_uri("DELETE",
+ url,
+ json={ "status": "OK",
+ "msg": "",
+ "data": {}},
+ status_code=201)
+
+ response = ars_client.http_delete("test")
+ self.assertTrue(response.status_code == 201)
+
+class PlayBookExecutionTests(unittest.TestCase):
+
+
+ def test_playbook_execution_ok(self):
+ """Check playbook id is set when the playbook is launched
+ """
+ with requests_mock.Mocker() as mock_server:
+
+ test_pb = mock_get_pb(mock_server, PB_NAME, 202)
+
+ test_pb.launch()
+
+ self.assertEqual(test_pb.play_uuid, PB_UUID,
+ "Found Unexpected playbook uuid")
+
+
+
+ def test_playbook_execution_error(self):
+ """Check playbook id is not set when the playbook is not present
+ """
+
+ with requests_mock.Mocker() as mock_server:
+
+ test_pb = mock_get_pb(mock_server, "unknown_playbook", 404)
+
+ with self.assertRaises(AnsibleRunnerServiceError):
+ test_pb.launch()
+
+ #self.assertEqual(test_pb.play_uuid, "",
+ # "Playbook uuid not empty")
+
+ def test_playbook_not_launched(self):
+ """Check right status code when Playbook execution has not been launched
+ """
+
+ with requests_mock.Mocker() as mock_server:
+
+ test_pb = mock_get_pb(mock_server, PB_NAME, 202)
+
+ # Check playbook not launched
+ self.assertEqual(test_pb.get_status(),
+ ExecutionStatusCode.NOT_LAUNCHED,
+ "Wrong status code for playbook not launched")
+
+ def test_playbook_launched(self):
+ """Check right status code when Playbook execution has been launched
+ """
+
+ with requests_mock.Mocker() as mock_server:
+
+ test_pb = mock_get_pb(mock_server, PB_NAME, 202)
+
+ test_pb.launch()
+
+ the_status_url = "https://%s/%s/%s" % (SERVER_URL,
+ PLAYBOOK_EXEC_URL,
+ PB_UUID)
+ mock_server.register_uri("GET",
+ the_status_url,
+ json={"status": "OK",
+ "msg": "running",
+ "data": {"task": "Step 2",
+ "last_task_num": 6}
+ },
+ status_code=200)
+
+ self.assertEqual(test_pb.get_status(),
+ ExecutionStatusCode.ON_GOING,
+ "Wrong status code for a running playbook")
+
+ self.assertEqual(test_pb.play_uuid, PB_UUID,
+ "Unexpected playbook uuid")
+
+ def test_playbook_finish_ok(self):
+ """Check right status code when Playbook execution is succesful
+ """
+ with requests_mock.Mocker() as mock_server:
+
+ test_pb = mock_get_pb(mock_server, PB_NAME, 202)
+
+ test_pb.launch()
+
+ the_status_url = "https://%s/%s/%s" % (SERVER_URL,
+ PLAYBOOK_EXEC_URL,
+ PB_UUID)
+ mock_server.register_uri("GET",
+ the_status_url,
+ json={"status": "OK",
+ "msg": "successful",
+ "data": {}
+ },
+ status_code=200)
+
+ self.assertEqual(test_pb.get_status(),
+ ExecutionStatusCode.SUCCESS,
+ "Wrong status code for a playbook executed succesfully")
+
+ def test_playbook_finish_error(self):
+ """Check right status code when Playbook execution has failed
+ """
+ with requests_mock.Mocker() as mock_server:
+
+ test_pb = mock_get_pb(mock_server, PB_NAME, 202)
+
+ test_pb.launch()
+
+ the_status_url = "https://%s/%s/%s" % (SERVER_URL,
+ PLAYBOOK_EXEC_URL,
+ PB_UUID)
+ mock_server.register_uri("GET",
+ the_status_url,
+ json={"status": "OK",
+ "msg": "failed",
+ "data": {}
+ },
+ status_code=200)
+
+ self.assertEqual(test_pb.get_status(),
+ ExecutionStatusCode.ERROR,
+ "Wrong status code for a playbook with error")
+
+ def test_playbook_get_result(self):
+ """ Find the right result event in a set of different events
+ """
+ with requests_mock.Mocker() as mock_server:
+
+ test_pb = mock_get_pb(mock_server, PB_NAME, 202)
+
+ test_pb.launch()
+
+ the_events_url = "https://%s/%s" % (SERVER_URL,
+ PLAYBOOK_EVENTS % PB_UUID)
+
+ # Get the events stored in a file
+ pb_events = {}
+ with open(PB_EVENTS_FILE) as events_file:
+ pb_events = json.loads(events_file.read())
+
+ mock_server.register_uri("GET",
+ the_events_url,
+ json=pb_events,
+ status_code=200)
+
+ result = test_pb.get_result("runner_on_ok")
+
+ self.assertEqual(len(result.keys()), 1,
+ "Unique result event not found")
+
+ self.assertIn("37-100564f1-9fed-48c2-bd62-4ae8636dfcdb",
+ result.keys(),
+ "Predefined result event not found")
diff --git a/src/pybind/mgr/ansible/tests/test_output_wizards.py b/src/pybind/mgr/ansible/tests/test_output_wizards.py
new file mode 100644
index 00000000..2a3a9017
--- /dev/null
+++ b/src/pybind/mgr/ansible/tests/test_output_wizards.py
@@ -0,0 +1,207 @@
+""" Test output wizards
+"""
+import unittest
+import mock
+
+from ..ansible_runner_svc import EVENT_DATA_URL
+from ..output_wizards import ProcessHostsList, ProcessPlaybookResult, \
+ ProcessInventory
+
+class OutputWizardProcessHostsList(unittest.TestCase):
+ """Test ProcessHostsList Output Wizard
+ """
+ RESULT_OK = """
+ {
+ "status": "OK",
+ "msg": "",
+ "data": {
+ "hosts": [
+ "host_a",
+ "host_b",
+ "host_c"
+ ]
+ }
+ }
+ """
+ ar_client = mock.Mock()
+ logger = mock.Mock()
+ test_wizard = ProcessHostsList(ar_client, logger)
+
+ def test_process(self):
+ """Test a normal call"""
+
+ nodes_list = self.test_wizard.process("", self.RESULT_OK)
+ self.assertEqual([node.name for node in nodes_list],
+ ["host_a", "host_b", "host_c"])
+
+ def test_errors(self):
+ """Test different kind of errors processing result"""
+
+ # Malformed json
+ host_list = self.test_wizard.process("", """{"msg": """"")
+ self.assertEqual(host_list, [])
+
+ # key error
+ host_list = self.test_wizard.process("", """{"msg": ""}""")
+ self.assertEqual(host_list, [])
+
+ # Hosts not in iterable
+ host_list = self.test_wizard.process("", """{"data":{"hosts": 123} }""")
+ self.assertEqual(host_list, [])
+
+class OutputWizardProcessPlaybookResult(unittest.TestCase):
+ """Test ProcessPlaybookResult Output Wizard
+ """
+ # Input to process
+ INVENTORY_EVENTS = {1:"first event", 2:"second event"}
+ EVENT_INFORMATION = "event information\n"
+
+ # Mocked response
+ mocked_response = mock.Mock()
+ mocked_response.text = EVENT_INFORMATION
+
+ # The Ansible Runner Service client
+ ar_client = mock.Mock()
+ ar_client.http_get = mock.MagicMock(return_value=mocked_response)
+
+ logger = mock.Mock()
+
+ test_wizard = ProcessPlaybookResult(ar_client, logger)
+
+ def test_process(self):
+ """Test a normal call
+ """
+
+ operation_id = 24
+ result = self.test_wizard.process(operation_id, self.INVENTORY_EVENTS)
+
+ # Check http request are correct and compose expected result
+ expected_result = ""
+ for key, dummy_data in self.INVENTORY_EVENTS.items():
+ http_request = EVENT_DATA_URL % (operation_id, key)
+ self.ar_client.http_get.assert_any_call(http_request)
+ expected_result += self.EVENT_INFORMATION
+
+ #Check result
+ self.assertEqual(result, expected_result)
+
+class OutputWizardProcessInventory(unittest.TestCase):
+ """Test ProcessInventory Output Wizard
+ """
+ # Input to process
+ INVENTORY_EVENTS = {'event_uuid_1': {'host': '192.168.121.144',
+ 'task': 'list storage inventory',
+ 'event': 'runner_on_ok'}}
+ EVENT_DATA = r"""
+ {
+ "status": "OK",
+ "msg": "",
+ "data": {
+ "uuid": "5e96d509-174d-4f5f-bd94-e278c3a5b85b",
+ "counter": 11,
+ "stdout": "changed: [192.168.121.144]",
+ "start_line": 17,
+ "end_line": 18,
+ "runner_ident": "6e98b2ba-3ce1-11e9-be81-2016b900e38f",
+ "created": "2019-03-02T11:50:56.582112",
+ "pid": 482,
+ "event_data": {
+ "play_pattern": "osds",
+ "play": "query each host for storage device inventory",
+ "task": "list storage inventory",
+ "task_args": "_ansible_version=2.6.5, _ansible_selinux_special_fs=['fuse', 'nfs', 'vboxsf', 'ramfs', '9p'], _ansible_no_log=False, _ansible_module_name=ceph_volume, _ansible_debug=False, _ansible_verbosity=0, _ansible_keep_remote_files=False, _ansible_syslog_facility=LOG_USER, _ansible_socket=None, action=inventory, _ansible_diff=False, _ansible_remote_tmp=~/.ansible/tmp, _ansible_shell_executable=/bin/sh, _ansible_check_mode=False, _ansible_tmpdir=None",
+ "remote_addr": "192.168.121.144",
+ "res": {
+ "_ansible_parsed": true,
+ "stderr_lines": [],
+ "changed": true,
+ "end": "2019-03-02 11:50:56.554937",
+ "_ansible_no_log": false,
+ "stdout": "[{\"available\": true, \"rejected_reasons\": [], \"sys_api\": {\"scheduler_mode\": \"noop\", \"rotational\": \"1\", \"vendor\": \"ATA\", \"human_readable_size\": \"50.00 GB\", \"sectors\": 0, \"sas_device_handle\": \"\", \"partitions\": {}, \"rev\": \"2.5+\", \"sas_address\": \"\", \"locked\": 0, \"sectorsize\": \"512\", \"removable\": \"0\", \"path\": \"/dev/sdc\", \"support_discard\": \"\", \"model\": \"QEMU HARDDISK\", \"ro\": \"0\", \"nr_requests\": \"128\", \"size\": 53687091200.0}, \"lvs\": [], \"path\": \"/dev/sdc\"}, {\"available\": false, \"rejected_reasons\": [\"locked\"], \"sys_api\": {\"scheduler_mode\": \"noop\", \"rotational\": \"1\", \"vendor\": \"ATA\", \"human_readable_size\": \"50.00 GB\", \"sectors\": 0, \"sas_device_handle\": \"\", \"partitions\": {}, \"rev\": \"2.5+\", \"sas_address\": \"\", \"locked\": 1, \"sectorsize\": \"512\", \"removable\": \"0\", \"path\": \"/dev/sda\", \"support_discard\": \"\", \"model\": \"QEMU HARDDISK\", \"ro\": \"0\", \"nr_requests\": \"128\", \"size\": 53687091200.0}, \"lvs\": [{\"cluster_name\": \"ceph\", \"name\": \"osd-data-dcf8a88c-5546-42d2-afa4-b36f7fb23b66\", \"osd_id\": \"3\", \"cluster_fsid\": \"30d61f3e-7ee4-4bdc-8fe7-2ad5bb3f5317\", \"type\": \"block\", \"block_uuid\": \"fVqujC-9dgh-cN9W-1XD4-zVx1-1UdA-fUS3ha\", \"osd_fsid\": \"8b7cbeba-5e86-44ff-a5f3-2e7df77753fe\"}], \"path\": \"/dev/sda\"}, {\"available\": false, \"rejected_reasons\": [\"locked\"], \"sys_api\": {\"scheduler_mode\": \"noop\", \"rotational\": \"1\", \"vendor\": \"ATA\", \"human_readable_size\": \"50.00 GB\", \"sectors\": 0, \"sas_device_handle\": \"\", \"partitions\": {}, \"rev\": \"2.5+\", \"sas_address\": \"\", \"locked\": 1, \"sectorsize\": \"512\", \"removable\": \"0\", \"path\": \"/dev/sdb\", \"support_discard\": \"\", \"model\": \"QEMU HARDDISK\", \"ro\": \"0\", \"nr_requests\": \"128\", \"size\": 53687091200.0}, \"lvs\": [{\"cluster_name\": \"ceph\", \"name\": \"osd-data-8c92e986-bd97-4b3d-ba77-2cb88e15d80f\", \"osd_id\": \"1\", \"cluster_fsid\": \"30d61f3e-7ee4-4bdc-8fe7-2ad5bb3f5317\", \"type\": \"block\", \"block_uuid\": \"mgzO7O-vUfu-H3mf-4R3K-2f97-ZMRH-SngBFP\", \"osd_fsid\": \"6d067688-3e1b-45f9-ad03-8abd19e9f117\"}], \"path\": \"/dev/sdb\"}, {\"available\": false, \"rejected_reasons\": [\"locked\"], \"sys_api\": {\"scheduler_mode\": \"mq-deadline\", \"rotational\": \"1\", \"vendor\": \"0x1af4\", \"human_readable_size\": \"41.00 GB\", \"sectors\": 0, \"sas_device_handle\": \"\", \"partitions\": {\"vda1\": {\"start\": \"2048\", \"holders\": [], \"sectorsize\": 512, \"sectors\": \"2048\", \"size\": \"1024.00 KB\"}, \"vda3\": {\"start\": \"2101248\", \"holders\": [\"dm-0\", \"dm-1\"], \"sectorsize\": 512, \"sectors\": \"81784832\", \"size\": \"39.00 GB\"}, \"vda2\": {\"start\": \"4096\", \"holders\": [], \"sectorsize\": 512, \"sectors\": \"2097152\", \"size\": \"1024.00 MB\"}}, \"rev\": \"\", \"sas_address\": \"\", \"locked\": 1, \"sectorsize\": \"512\", \"removable\": \"0\", \"path\": \"/dev/vda\", \"support_discard\": \"\", \"model\": \"\", \"ro\": \"0\", \"nr_requests\": \"256\", \"size\": 44023414784.0}, \"lvs\": [{\"comment\": \"not used by ceph\", \"name\": \"LogVol00\"}, {\"comment\": \"not used by ceph\", \"name\": \"LogVol01\"}], \"path\": \"/dev/vda\"}]",
+ "cmd": [
+ "ceph-volume",
+ "inventory",
+ "--format=json"
+ ],
+ "rc": 0,
+ "start": "2019-03-02 11:50:55.150121",
+ "stderr": "",
+ "delta": "0:00:01.404816",
+ "invocation": {
+ "module_args": {
+ "wal_vg": null,
+ "wal": null,
+ "dmcrypt": false,
+ "block_db_size": "-1",
+ "journal": null,
+ "objectstore": "bluestore",
+ "db": null,
+ "batch_devices": [],
+ "db_vg": null,
+ "journal_vg": null,
+ "cluster": "ceph",
+ "osds_per_device": 1,
+ "containerized": "False",
+ "crush_device_class": null,
+ "report": false,
+ "data_vg": null,
+ "data": null,
+ "action": "inventory",
+ "journal_size": "5120"
+ }
+ },
+ "stdout_lines": [
+ "[{\"available\": true, \"rejected_reasons\": [], \"sys_api\": {\"scheduler_mode\": \"noop\", \"rotational\": \"1\", \"vendor\": \"ATA\", \"human_readable_size\": \"50.00 GB\", \"sectors\": 0, \"sas_device_handle\": \"\", \"partitions\": {}, \"rev\": \"2.5+\", \"sas_address\": \"\", \"locked\": 0, \"sectorsize\": \"512\", \"removable\": \"0\", \"path\": \"/dev/sdc\", \"support_discard\": \"\", \"model\": \"QEMU HARDDISK\", \"ro\": \"0\", \"nr_requests\": \"128\", \"size\": 53687091200.0}, \"lvs\": [], \"path\": \"/dev/sdc\"}, {\"available\": false, \"rejected_reasons\": [\"locked\"], \"sys_api\": {\"scheduler_mode\": \"noop\", \"rotational\": \"1\", \"vendor\": \"ATA\", \"human_readable_size\": \"50.00 GB\", \"sectors\": 0, \"sas_device_handle\": \"\", \"partitions\": {}, \"rev\": \"2.5+\", \"sas_address\": \"\", \"locked\": 1, \"sectorsize\": \"512\", \"removable\": \"0\", \"path\": \"/dev/sda\", \"support_discard\": \"\", \"model\": \"QEMU HARDDISK\", \"ro\": \"0\", \"nr_requests\": \"128\", \"size\": 53687091200.0}, \"lvs\": [{\"cluster_name\": \"ceph\", \"name\": \"osd-data-dcf8a88c-5546-42d2-afa4-b36f7fb23b66\", \"osd_id\": \"3\", \"cluster_fsid\": \"30d61f3e-7ee4-4bdc-8fe7-2ad5bb3f5317\", \"type\": \"block\", \"block_uuid\": \"fVqujC-9dgh-cN9W-1XD4-zVx1-1UdA-fUS3ha\", \"osd_fsid\": \"8b7cbeba-5e86-44ff-a5f3-2e7df77753fe\"}], \"path\": \"/dev/sda\"}, {\"available\": false, \"rejected_reasons\": [\"locked\"], \"sys_api\": {\"scheduler_mode\": \"noop\", \"rotational\": \"1\", \"vendor\": \"ATA\", \"human_readable_size\": \"50.00 GB\", \"sectors\": 0, \"sas_device_handle\": \"\", \"partitions\": {}, \"rev\": \"2.5+\", \"sas_address\": \"\", \"locked\": 1, \"sectorsize\": \"512\", \"removable\": \"0\", \"path\": \"/dev/sdb\", \"support_discard\": \"\", \"model\": \"QEMU HARDDISK\", \"ro\": \"0\", \"nr_requests\": \"128\", \"size\": 53687091200.0}, \"lvs\": [{\"cluster_name\": \"ceph\", \"name\": \"osd-data-8c92e986-bd97-4b3d-ba77-2cb88e15d80f\", \"osd_id\": \"1\", \"cluster_fsid\": \"30d61f3e-7ee4-4bdc-8fe7-2ad5bb3f5317\", \"type\": \"block\", \"block_uuid\": \"mgzO7O-vUfu-H3mf-4R3K-2f97-ZMRH-SngBFP\", \"osd_fsid\": \"6d067688-3e1b-45f9-ad03-8abd19e9f117\"}], \"path\": \"/dev/sdb\"}, {\"available\": false, \"rejected_reasons\": [\"locked\"], \"sys_api\": {\"scheduler_mode\": \"mq-deadline\", \"rotational\": \"1\", \"vendor\": \"0x1af4\", \"human_readable_size\": \"41.00 GB\", \"sectors\": 0, \"sas_device_handle\": \"\", \"partitions\": {\"vda1\": {\"start\": \"2048\", \"holders\": [], \"sectorsize\": 512, \"sectors\": \"2048\", \"size\": \"1024.00 KB\"}, \"vda3\": {\"start\": \"2101248\", \"holders\": [\"dm-0\", \"dm-1\"], \"sectorsize\": 512, \"sectors\": \"81784832\", \"size\": \"39.00 GB\"}, \"vda2\": {\"start\": \"4096\", \"holders\": [], \"sectorsize\": 512, \"sectors\": \"2097152\", \"size\": \"1024.00 MB\"}}, \"rev\": \"\", \"sas_address\": \"\", \"locked\": 1, \"sectorsize\": \"512\", \"removable\": \"0\", \"path\": \"/dev/vda\", \"support_discard\": \"\", \"model\": \"\", \"ro\": \"0\", \"nr_requests\": \"256\", \"size\": 44023414784.0}, \"lvs\": [{\"comment\": \"not used by ceph\", \"name\": \"LogVol00\"}, {\"comment\": \"not used by ceph\", \"name\": \"LogVol01\"}], \"path\": \"/dev/vda\"}]"
+ ]
+ },
+ "pid": 482,
+ "play_uuid": "2016b900-e38f-0e09-19be-00000000000c",
+ "task_uuid": "2016b900-e38f-0e09-19be-000000000012",
+ "event_loop": null,
+ "playbook_uuid": "e80e66f2-4a78-4a96-aaf6-fbe473f11312",
+ "playbook": "storage-inventory.yml",
+ "task_action": "ceph_volume",
+ "host": "192.168.121.144",
+ "task_path": "/usr/share/ansible-runner-service/project/storage-inventory.yml:29"
+ },
+ "event": "runner_on_ok"
+ }
+ }
+ """
+
+ # Mocked response
+ mocked_response = mock.Mock()
+ mocked_response.text = EVENT_DATA
+
+ # The Ansible Runner Service client
+ ar_client = mock.Mock()
+ ar_client.http_get = mock.MagicMock(return_value=mocked_response)
+
+ logger = mock.Mock()
+
+ test_wizard = ProcessInventory(ar_client, logger)
+
+ def test_process(self):
+ """Test a normal call
+ """
+ operation_id = 12
+ nodes_list = self.test_wizard.process(operation_id, self.INVENTORY_EVENTS)
+
+ for key, dummy_data in self.INVENTORY_EVENTS.items():
+ http_request = EVENT_DATA_URL % (operation_id, key)
+ self.ar_client.http_get.assert_any_call(http_request)
+
+
+ # Only one host
+ self.assertTrue(len(nodes_list), 1)
+
+ # Host retrieved OK
+ self.assertEqual(nodes_list[0].name, "192.168.121.144")
+
+ # Devices
+ self.assertTrue(len(nodes_list[0].devices), 4)
+
+ expected_device_ids = ["/dev/sdc", "/dev/sda", "/dev/sdb", "/dev/vda"]
+ device_ids = [dev.id for dev in nodes_list[0].devices]
+
+ self.assertEqual(expected_device_ids, device_ids)
diff --git a/src/pybind/mgr/ansible/tox.ini b/src/pybind/mgr/ansible/tox.ini
new file mode 100644
index 00000000..ae9888a6
--- /dev/null
+++ b/src/pybind/mgr/ansible/tox.ini
@@ -0,0 +1,18 @@
+[tox]
+envlist = py27,py3
+skipsdist = true
+toxworkdir = {env:CEPH_BUILD_DIR}/ansible
+minversion = 2.8.1
+
+[testenv]
+deps =
+ pytest
+ mock
+ requests-mock
+setenv=
+ UNITTEST = true
+ py27: PYTHONPATH = {env:CEPH_LIB}/cython_modules/lib.2
+ py3: PYTHONPATH = {env:CEPH_LIB}/cython_modules/lib.3
+
+commands=
+ {envbindir}/py.test tests/