summaryrefslogtreecommitdiffstats
path: root/src/pybind/mgr/dashboard/controllers
diff options
context:
space:
mode:
Diffstat (limited to 'src/pybind/mgr/dashboard/controllers')
-rw-r--r--src/pybind/mgr/dashboard/controllers/__init__.py957
-rw-r--r--src/pybind/mgr/dashboard/controllers/auth.py96
-rw-r--r--src/pybind/mgr/dashboard/controllers/cephfs.py294
-rw-r--r--src/pybind/mgr/dashboard/controllers/cluster_configuration.py106
-rw-r--r--src/pybind/mgr/dashboard/controllers/docs.py427
-rw-r--r--src/pybind/mgr/dashboard/controllers/erasure_code_profile.py66
-rw-r--r--src/pybind/mgr/dashboard/controllers/grafana.py50
-rw-r--r--src/pybind/mgr/dashboard/controllers/health.py189
-rw-r--r--src/pybind/mgr/dashboard/controllers/home.py133
-rw-r--r--src/pybind/mgr/dashboard/controllers/host.py12
-rw-r--r--src/pybind/mgr/dashboard/controllers/iscsi.py1049
-rw-r--r--src/pybind/mgr/dashboard/controllers/logging.py10
-rw-r--r--src/pybind/mgr/dashboard/controllers/logs.py51
-rw-r--r--src/pybind/mgr/dashboard/controllers/mgr_modules.py172
-rw-r--r--src/pybind/mgr/dashboard/controllers/monitor.py40
-rw-r--r--src/pybind/mgr/dashboard/controllers/nfsganesha.py315
-rw-r--r--src/pybind/mgr/dashboard/controllers/osd.py242
-rw-r--r--src/pybind/mgr/dashboard/controllers/perf_counters.py85
-rw-r--r--src/pybind/mgr/dashboard/controllers/pool.py234
-rw-r--r--src/pybind/mgr/dashboard/controllers/prometheus.py83
-rw-r--r--src/pybind/mgr/dashboard/controllers/rbd.py526
-rw-r--r--src/pybind/mgr/dashboard/controllers/rbd_mirroring.py460
-rw-r--r--src/pybind/mgr/dashboard/controllers/rgw.py385
-rw-r--r--src/pybind/mgr/dashboard/controllers/role.py110
-rw-r--r--src/pybind/mgr/dashboard/controllers/saml2.py113
-rw-r--r--src/pybind/mgr/dashboard/controllers/settings.py68
-rw-r--r--src/pybind/mgr/dashboard/controllers/summary.py87
-rw-r--r--src/pybind/mgr/dashboard/controllers/task.py15
-rw-r--r--src/pybind/mgr/dashboard/controllers/user.py91
29 files changed, 6466 insertions, 0 deletions
diff --git a/src/pybind/mgr/dashboard/controllers/__init__.py b/src/pybind/mgr/dashboard/controllers/__init__.py
new file mode 100644
index 00000000..17e293e7
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/__init__.py
@@ -0,0 +1,957 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=protected-access,too-many-branches
+from __future__ import absolute_import
+
+import collections
+import importlib
+import inspect
+import json
+import os
+import pkgutil
+import re
+import sys
+
+if sys.version_info >= (3, 0):
+ from urllib.parse import unquote # pylint: disable=no-name-in-module,import-error
+else:
+ from urllib import unquote # pylint: disable=no-name-in-module
+
+# pylint: disable=wrong-import-position
+import cherrypy
+
+from .. import logger
+from ..security import Scope, Permission
+from ..tools import wraps, getargspec, TaskManager, get_request_body_params
+from ..exceptions import ScopeNotValid, PermissionNotValid
+from ..services.auth import AuthManager, JwtManager
+from ..plugins import PLUGIN_MANAGER
+
+
+def EndpointDoc(description="", group="", parameters=None, responses=None):
+ if not isinstance(description, str):
+ raise Exception("%s has been called with a description that is not a string: %s"
+ % (EndpointDoc.__name__, description))
+ elif not isinstance(group, str):
+ raise Exception("%s has been called with a groupname that is not a string: %s"
+ % (EndpointDoc.__name__, group))
+ elif parameters and not isinstance(parameters, dict):
+ raise Exception("%s has been called with parameters that is not a dict: %s"
+ % (EndpointDoc.__name__, parameters))
+ elif responses and not isinstance(responses, dict):
+ raise Exception("%s has been called with responses that is not a dict: %s"
+ % (EndpointDoc.__name__, responses))
+
+ if not parameters:
+ parameters = {}
+
+ def _split_param(name, p_type, description, optional=False, default_value=None, nested=False):
+ param = {
+ 'name': name,
+ 'description': description,
+ 'required': not optional,
+ 'nested': nested,
+ }
+ if default_value:
+ param['default'] = default_value
+ if isinstance(p_type, type):
+ param['type'] = p_type
+ else:
+ nested_params = _split_parameters(p_type, nested=True)
+ if nested_params:
+ param['type'] = type(p_type)
+ param['nested_params'] = nested_params
+ else:
+ param['type'] = p_type
+ return param
+
+ # Optional must be set to True in order to set default value and parameters format must be:
+ # 'name: (type or nested parameters, description, [optional], [default value])'
+ def _split_dict(data, nested):
+ splitted = []
+ for name, props in data.items():
+ if isinstance(name, str) and isinstance(props, tuple):
+ if len(props) == 2:
+ param = _split_param(name, props[0], props[1], nested=nested)
+ elif len(props) == 3:
+ param = _split_param(name, props[0], props[1], optional=props[2], nested=nested)
+ if len(props) == 4:
+ param = _split_param(name, props[0], props[1], props[2], props[3], nested)
+ splitted.append(param)
+ else:
+ raise Exception(
+ """Parameter %s in %s has not correct format. Valid formats are:
+ <name>: (<type>, <description>, [optional], [default value])
+ <name>: (<[type]>, <description>, [optional], [default value])
+ <name>: (<[nested parameters]>, <description>, [optional], [default value])
+ <name>: (<{nested parameters}>, <description>, [optional], [default value])"""
+ % (name, EndpointDoc.__name__))
+ return splitted
+
+ def _split_list(data, nested):
+ splitted = []
+ for item in data:
+ splitted.extend(_split_parameters(item, nested))
+ return splitted
+
+ # nested = True means parameters are inside a dict or array
+ def _split_parameters(data, nested=False):
+ param_list = []
+ if isinstance(data, dict):
+ param_list.extend(_split_dict(data, nested))
+ elif isinstance(data, (list, tuple)):
+ param_list.extend(_split_list(data, True))
+ return param_list
+
+ resp = {}
+ if responses:
+ for status_code, response_body in responses.items():
+ resp[str(status_code)] = _split_parameters(response_body)
+
+ def _wrapper(func):
+ func.doc_info = {
+ 'summary': description,
+ 'tag': group,
+ 'parameters': _split_parameters(parameters),
+ 'response': resp
+ }
+ return func
+
+ return _wrapper
+
+
+class ControllerDoc(object):
+ def __init__(self, description="", group=""):
+ self.tag = group
+ self.tag_descr = description
+
+ def __call__(self, cls):
+ cls.doc_info = {
+ 'tag': self.tag,
+ 'tag_descr': self.tag_descr
+ }
+ return cls
+
+
+class Controller(object):
+ def __init__(self, path, base_url=None, security_scope=None, secure=True):
+ if security_scope and not Scope.valid_scope(security_scope):
+ logger.debug("Invalid security scope name: %s\n Possible values: "
+ "%s", security_scope, Scope.all_scopes())
+ raise ScopeNotValid(security_scope)
+ self.path = path
+ self.base_url = base_url
+ self.security_scope = security_scope
+ self.secure = secure
+
+ if self.path and self.path[0] != "/":
+ self.path = "/" + self.path
+
+ if self.base_url is None:
+ self.base_url = ""
+ elif self.base_url == "/":
+ self.base_url = ""
+
+ if self.base_url == "" and self.path == "":
+ self.base_url = "/"
+
+ def __call__(self, cls):
+ cls._cp_controller_ = True
+ cls._cp_path_ = "{}{}".format(self.base_url, self.path)
+ cls._security_scope = self.security_scope
+
+ config = {
+ 'tools.dashboard_exception_handler.on': True,
+ 'tools.authenticate.on': self.secure,
+ }
+ if not hasattr(cls, '_cp_config'):
+ cls._cp_config = {}
+ cls._cp_config.update(config)
+ return cls
+
+
+class ApiController(Controller):
+ def __init__(self, path, security_scope=None, secure=True):
+ super(ApiController, self).__init__(path, base_url="/api",
+ security_scope=security_scope,
+ secure=secure)
+
+ def __call__(self, cls):
+ cls = super(ApiController, self).__call__(cls)
+ cls._api_endpoint = True
+ return cls
+
+
+class UiApiController(Controller):
+ def __init__(self, path, security_scope=None, secure=True):
+ super(UiApiController, self).__init__(path, base_url="/ui-api",
+ security_scope=security_scope,
+ secure=secure)
+
+
+def Endpoint(method=None, path=None, path_params=None, query_params=None,
+ json_response=True, proxy=False, xml=False):
+
+ if method is None:
+ method = 'GET'
+ elif not isinstance(method, str) or \
+ method.upper() not in ['GET', 'POST', 'DELETE', 'PUT']:
+ raise TypeError("Possible values for method are: 'GET', 'POST', "
+ "'DELETE', or 'PUT'")
+
+ method = method.upper()
+
+ if method in ['GET', 'DELETE']:
+ if path_params is not None:
+ raise TypeError("path_params should not be used for {} "
+ "endpoints. All function params are considered"
+ " path parameters by default".format(method))
+
+ if path_params is None:
+ if method in ['POST', 'PUT']:
+ path_params = []
+
+ if query_params is None:
+ query_params = []
+
+ def _wrapper(func):
+ if method in ['POST', 'PUT']:
+ func_params = _get_function_params(func)
+ for param in func_params:
+ if param['name'] in path_params and not param['required']:
+ raise TypeError("path_params can only reference "
+ "non-optional function parameters")
+
+ if func.__name__ == '__call__' and path is None:
+ e_path = ""
+ else:
+ e_path = path
+
+ if e_path is not None:
+ e_path = e_path.strip()
+ if e_path and e_path[0] != "/":
+ e_path = "/" + e_path
+ elif e_path == "/":
+ e_path = ""
+
+ func._endpoint = {
+ 'method': method,
+ 'path': e_path,
+ 'path_params': path_params,
+ 'query_params': query_params,
+ 'json_response': json_response,
+ 'proxy': proxy,
+ 'xml': xml
+ }
+ return func
+ return _wrapper
+
+
+def Proxy(path=None):
+ if path is None:
+ path = ""
+ elif path == "/":
+ path = ""
+ path += "/{path:.*}"
+ return Endpoint(path=path, proxy=True)
+
+
+def load_controllers():
+ # setting sys.path properly when not running under the mgr
+ controllers_dir = os.path.dirname(os.path.realpath(__file__))
+ dashboard_dir = os.path.dirname(controllers_dir)
+ mgr_dir = os.path.dirname(dashboard_dir)
+ logger.debug("LC: controllers_dir=%s", controllers_dir)
+ logger.debug("LC: dashboard_dir=%s", dashboard_dir)
+ logger.debug("LC: mgr_dir=%s", mgr_dir)
+ if mgr_dir not in sys.path:
+ sys.path.append(mgr_dir)
+
+ controllers = []
+ mods = [mod for _, mod, _ in pkgutil.iter_modules([controllers_dir])]
+ logger.debug("LC: mods=%s", mods)
+ for mod_name in mods:
+ mod = importlib.import_module('.controllers.{}'.format(mod_name),
+ package='dashboard')
+ for _, cls in mod.__dict__.items():
+ # Controllers MUST be derived from the class BaseController.
+ if inspect.isclass(cls) and issubclass(cls, BaseController) and \
+ hasattr(cls, '_cp_controller_'):
+ if cls._cp_path_.startswith(':'):
+ # invalid _cp_path_ value
+ logger.error("Invalid url prefix '%s' for controller '%s'",
+ cls._cp_path_, cls.__name__)
+ continue
+ controllers.append(cls)
+
+ for clist in PLUGIN_MANAGER.hook.get_controllers() or []:
+ controllers.extend(clist)
+
+ return controllers
+
+
+ENDPOINT_MAP = collections.defaultdict(list)
+
+
+def generate_controller_routes(endpoint, mapper, base_url):
+ inst = endpoint.inst
+ ctrl_class = endpoint.ctrl
+
+ if endpoint.proxy:
+ conditions = None
+ else:
+ conditions = dict(method=[endpoint.method])
+
+ # base_url can be empty or a URL path that starts with "/"
+ # we will remove the trailing "/" if exists to help with the
+ # concatenation with the endpoint url below
+ if base_url.endswith("/"):
+ base_url = base_url[:-1]
+
+ endp_url = endpoint.url
+
+ if endp_url.find("/", 1) == -1:
+ parent_url = "{}{}".format(base_url, endp_url)
+ else:
+ parent_url = "{}{}".format(base_url, endp_url[:endp_url.find("/", 1)])
+
+ # parent_url might be of the form "/.../{...}" where "{...}" is a path parameter
+ # we need to remove the path parameter definition
+ parent_url = re.sub(r'(?:/\{[^}]+\})$', '', parent_url)
+ if not parent_url: # root path case
+ parent_url = "/"
+
+ url = "{}{}".format(base_url, endp_url)
+
+ logger.debug("Mapped [%s] to %s:%s restricted to %s",
+ url, ctrl_class.__name__, endpoint.action,
+ endpoint.method)
+
+ ENDPOINT_MAP[endpoint.url].append(endpoint)
+
+ name = ctrl_class.__name__ + ":" + endpoint.action
+ mapper.connect(name, url, controller=inst, action=endpoint.action,
+ conditions=conditions)
+
+ # adding route with trailing slash
+ name += "/"
+ url += "/"
+ mapper.connect(name, url, controller=inst, action=endpoint.action,
+ conditions=conditions)
+
+ return parent_url
+
+
+def generate_routes(url_prefix):
+ mapper = cherrypy.dispatch.RoutesDispatcher()
+ ctrls = load_controllers()
+
+ parent_urls = set()
+
+ endpoint_list = []
+ for ctrl in ctrls:
+ inst = ctrl()
+ for endpoint in ctrl.endpoints():
+ endpoint.inst = inst
+ endpoint_list.append(endpoint)
+
+ endpoint_list = sorted(endpoint_list, key=lambda e: e.url)
+ for endpoint in endpoint_list:
+ parent_urls.add(generate_controller_routes(endpoint, mapper,
+ "{}".format(url_prefix)))
+
+ logger.debug("list of parent paths: %s", parent_urls)
+ return mapper, parent_urls
+
+
+def json_error_page(status, message, traceback, version):
+ cherrypy.response.headers['Content-Type'] = 'application/json'
+ return json.dumps(dict(status=status, detail=message, traceback=traceback,
+ version=version))
+
+
+def _get_function_params(func):
+ """
+ Retrieves the list of parameters declared in function.
+ Each parameter is represented as dict with keys:
+ * name (str): the name of the parameter
+ * required (bool): whether the parameter is required or not
+ * default (obj): the parameter's default value
+ """
+ fspec = getargspec(func)
+
+ func_params = []
+ nd = len(fspec.args) if not fspec.defaults else -len(fspec.defaults)
+ for param in fspec.args[1:nd]:
+ func_params.append({'name': param, 'required': True})
+
+ if fspec.defaults:
+ for param, val in zip(fspec.args[nd:], fspec.defaults):
+ func_params.append({
+ 'name': param,
+ 'required': False,
+ 'default': val
+ })
+
+ return func_params
+
+
+class Task(object):
+ def __init__(self, name, metadata, wait_for=5.0, exception_handler=None):
+ self.name = name
+ if isinstance(metadata, list):
+ self.metadata = {e[1:-1]: e for e in metadata}
+ else:
+ self.metadata = metadata
+ self.wait_for = wait_for
+ self.exception_handler = exception_handler
+
+ def _gen_arg_map(self, func, args, kwargs):
+ arg_map = {}
+ params = _get_function_params(func)
+
+ args = args[1:] # exclude self
+ for idx, param in enumerate(params):
+ if idx < len(args):
+ arg_map[param['name']] = args[idx]
+ else:
+ if param['name'] in kwargs:
+ arg_map[param['name']] = kwargs[param['name']]
+ else:
+ assert not param['required'], "{0} is required".format(param['name'])
+ arg_map[param['name']] = param['default']
+
+ if param['name'] in arg_map:
+ # This is not a type error. We are using the index here.
+ arg_map[idx+1] = arg_map[param['name']]
+
+ return arg_map
+
+ def __call__(self, func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ arg_map = self._gen_arg_map(func, args, kwargs)
+ metadata = {}
+ for k, v in self.metadata.items():
+ if isinstance(v, str) and v and v[0] == '{' and v[-1] == '}':
+ param = v[1:-1]
+ try:
+ pos = int(param)
+ metadata[k] = arg_map[pos]
+ except ValueError:
+ if param.find('.') == -1:
+ metadata[k] = arg_map[param]
+ else:
+ path = param.split('.')
+ metadata[k] = arg_map[path[0]]
+ for i in range(1, len(path)):
+ metadata[k] = metadata[k][path[i]]
+ else:
+ metadata[k] = v
+ task = TaskManager.run(self.name, metadata, func, args, kwargs,
+ exception_handler=self.exception_handler)
+ try:
+ status, value = task.wait(self.wait_for)
+ except Exception as ex:
+ if task.ret_value:
+ # exception was handled by task.exception_handler
+ if 'status' in task.ret_value:
+ status = task.ret_value['status']
+ else:
+ status = getattr(ex, 'status', 500)
+ cherrypy.response.status = status
+ return task.ret_value
+ raise ex
+ if status == TaskManager.VALUE_EXECUTING:
+ cherrypy.response.status = 202
+ return {'name': self.name, 'metadata': metadata}
+ return value
+ return wrapper
+
+
+class BaseController(object):
+ """
+ Base class for all controllers providing API endpoints.
+ """
+
+ class Endpoint(object):
+ """
+ An instance of this class represents an endpoint.
+ """
+
+ def __init__(self, ctrl, func):
+ self.ctrl = ctrl
+ self.inst = None
+ self.func = func
+
+ if not self.config['proxy']:
+ setattr(self.ctrl, func.__name__, self.function)
+
+ @property
+ def config(self):
+ func = self.func
+ while not hasattr(func, '_endpoint'):
+ if hasattr(func, "__wrapped__"):
+ func = func.__wrapped__
+ else:
+ return None
+ return func._endpoint
+
+ @property
+ def function(self):
+ return self.ctrl._request_wrapper(self.func, self.method,
+ self.config['json_response'],
+ self.config['xml'])
+
+ @property
+ def method(self):
+ return self.config['method']
+
+ @property
+ def proxy(self):
+ return self.config['proxy']
+
+ @property
+ def url(self):
+ ctrl_path = self.ctrl.get_path()
+ if ctrl_path == "/":
+ ctrl_path = ""
+ if self.config['path'] is not None:
+ url = "{}{}".format(ctrl_path, self.config['path'])
+ else:
+ url = "{}/{}".format(ctrl_path, self.func.__name__)
+
+ ctrl_path_params = self.ctrl.get_path_param_names(
+ self.config['path'])
+ path_params = [p['name'] for p in self.path_params
+ if p['name'] not in ctrl_path_params]
+ path_params = ["{{{}}}".format(p) for p in path_params]
+ if path_params:
+ url += "/{}".format("/".join(path_params))
+
+ return url
+
+ @property
+ def action(self):
+ return self.func.__name__
+
+ @property
+ def path_params(self):
+ ctrl_path_params = self.ctrl.get_path_param_names(
+ self.config['path'])
+ func_params = _get_function_params(self.func)
+
+ if self.method in ['GET', 'DELETE']:
+ assert self.config['path_params'] is None
+
+ return [p for p in func_params if p['name'] in ctrl_path_params
+ or (p['name'] not in self.config['query_params']
+ and p['required'])]
+
+ # elif self.method in ['POST', 'PUT']:
+ return [p for p in func_params if p['name'] in ctrl_path_params
+ or p['name'] in self.config['path_params']]
+
+ @property
+ def query_params(self):
+ if self.method in ['GET', 'DELETE']:
+ func_params = _get_function_params(self.func)
+ path_params = [p['name'] for p in self.path_params]
+ return [p for p in func_params if p['name'] not in path_params]
+
+ # elif self.method in ['POST', 'PUT']:
+ func_params = _get_function_params(self.func)
+ return [p for p in func_params
+ if p['name'] in self.config['query_params']]
+
+ @property
+ def body_params(self):
+ func_params = _get_function_params(self.func)
+ path_params = [p['name'] for p in self.path_params]
+ query_params = [p['name'] for p in self.query_params]
+ return [p for p in func_params
+ if p['name'] not in path_params
+ and p['name'] not in query_params]
+
+ @property
+ def group(self):
+ return self.ctrl.__name__
+
+ @property
+ def is_api(self):
+ return hasattr(self.ctrl, '_api_endpoint')
+
+ @property
+ def is_secure(self):
+ return self.ctrl._cp_config['tools.authenticate.on']
+
+ def __repr__(self):
+ return "Endpoint({}, {}, {})".format(self.url, self.method,
+ self.action)
+
+ def __init__(self):
+ logger.info('Initializing controller: %s -> %s',
+ self.__class__.__name__, self._cp_path_)
+ super(BaseController, self).__init__()
+
+ def _has_permissions(self, permissions, scope=None):
+ if not self._cp_config['tools.authenticate.on']:
+ raise Exception("Cannot verify permission in non secured "
+ "controllers")
+
+ if not isinstance(permissions, list):
+ permissions = [permissions]
+
+ if scope is None:
+ scope = getattr(self, '_security_scope', None)
+ if scope is None:
+ raise Exception("Cannot verify permissions without scope security"
+ " defined")
+ username = JwtManager.LOCAL_USER.username
+ return AuthManager.authorize(username, scope, permissions)
+
+ @classmethod
+ def get_path_param_names(cls, path_extension=None):
+ if path_extension is None:
+ path_extension = ""
+ full_path = cls._cp_path_[1:] + path_extension
+ path_params = []
+ for step in full_path.split('/'):
+ param = None
+ if not step:
+ continue
+ if step[0] == ':':
+ param = step[1:]
+ elif step[0] == '{' and step[-1] == '}':
+ param, _, _ = step[1:-1].partition(':')
+ if param:
+ path_params.append(param)
+ return path_params
+
+ @classmethod
+ def get_path(cls):
+ return cls._cp_path_
+
+ @classmethod
+ def endpoints(cls):
+ """
+ This method iterates over all the methods decorated with ``@endpoint``
+ and creates an Endpoint object for each one of the methods.
+
+ :return: A list of endpoint objects
+ :rtype: list[BaseController.Endpoint]
+ """
+ result = []
+ for _, func in inspect.getmembers(cls, predicate=callable):
+ if hasattr(func, '_endpoint'):
+ result.append(cls.Endpoint(cls, func))
+ return result
+
+ @staticmethod
+ def _request_wrapper(func, method, json_response, xml): # pylint: disable=unused-argument
+ @wraps(func)
+ def inner(*args, **kwargs):
+ for key, value in kwargs.items():
+ # pylint: disable=undefined-variable
+ if (sys.version_info < (3, 0) and isinstance(value, unicode)) \
+ or isinstance(value, str):
+ kwargs[key] = unquote(value)
+
+ # Process method arguments.
+ params = get_request_body_params(cherrypy.request)
+ kwargs.update(params)
+
+ ret = func(*args, **kwargs)
+ if isinstance(ret, bytes):
+ ret = ret.decode('utf-8')
+ if xml:
+ cherrypy.response.headers['Content-Type'] = 'application/xml'
+ return ret.encode('utf8')
+ if json_response:
+ cherrypy.response.headers['Content-Type'] = 'application/json'
+ ret = json.dumps(ret).encode('utf8')
+ return ret
+ return inner
+
+ @property
+ def _request(self):
+ return self.Request(cherrypy.request)
+
+ class Request(object):
+ def __init__(self, cherrypy_req):
+ self._creq = cherrypy_req
+
+ @property
+ def scheme(self):
+ return self._creq.scheme
+
+ @property
+ def host(self):
+ base = self._creq.base
+ base = base[len(self.scheme)+3:]
+ return base[:base.find(":")] if ":" in base else base
+
+ @property
+ def port(self):
+ base = self._creq.base
+ base = base[len(self.scheme)+3:]
+ default_port = 443 if self.scheme == 'https' else 80
+ return int(base[base.find(":")+1:]) if ":" in base else default_port
+
+ @property
+ def path_info(self):
+ return self._creq.path_info
+
+
+class RESTController(BaseController):
+ """
+ Base class for providing a RESTful interface to a resource.
+
+ To use this class, simply derive a class from it and implement the methods
+ you want to support. The list of possible methods are:
+
+ * list()
+ * bulk_set(data)
+ * create(data)
+ * bulk_delete()
+ * get(key)
+ * set(data, key)
+ * delete(key)
+
+ Test with curl:
+
+ curl -H "Content-Type: application/json" -X POST \
+ -d '{"username":"xyz","password":"xyz"}' https://127.0.0.1:8443/foo
+ curl https://127.0.0.1:8443/foo
+ curl https://127.0.0.1:8443/foo/0
+
+ """
+
+ # resource id parameter for using in get, set, and delete methods
+ # should be overridden by subclasses.
+ # to specify a composite id (two parameters) use '/'. e.g., "param1/param2".
+ # If subclasses don't override this property we try to infer the structure
+ # of the resource ID.
+ RESOURCE_ID = None
+
+ _permission_map = {
+ 'GET': Permission.READ,
+ 'POST': Permission.CREATE,
+ 'PUT': Permission.UPDATE,
+ 'DELETE': Permission.DELETE
+ }
+
+ _method_mapping = collections.OrderedDict([
+ ('list', {'method': 'GET', 'resource': False, 'status': 200}),
+ ('create', {'method': 'POST', 'resource': False, 'status': 201}),
+ ('bulk_set', {'method': 'PUT', 'resource': False, 'status': 200}),
+ ('bulk_delete', {'method': 'DELETE', 'resource': False, 'status': 204}),
+ ('get', {'method': 'GET', 'resource': True, 'status': 200}),
+ ('delete', {'method': 'DELETE', 'resource': True, 'status': 204}),
+ ('set', {'method': 'PUT', 'resource': True, 'status': 200})
+ ])
+
+ @classmethod
+ def infer_resource_id(cls):
+ if cls.RESOURCE_ID is not None:
+ return cls.RESOURCE_ID.split('/')
+ for k, v in cls._method_mapping.items():
+ func = getattr(cls, k, None)
+ while hasattr(func, "__wrapped__"):
+ func = func.__wrapped__
+ if v['resource'] and func:
+ path_params = cls.get_path_param_names()
+ params = _get_function_params(func)
+ return [p['name'] for p in params
+ if p['required'] and p['name'] not in path_params]
+ return None
+
+ @classmethod
+ def endpoints(cls):
+ result = super(RESTController, cls).endpoints()
+ res_id_params = cls.infer_resource_id()
+
+ for _, func in inspect.getmembers(cls, predicate=callable):
+ no_resource_id_params = False
+ status = 200
+ method = None
+ query_params = None
+ path = ""
+ sec_permissions = hasattr(func, '_security_permissions')
+ permission = None
+
+ if func.__name__ in cls._method_mapping:
+ meth = cls._method_mapping[func.__name__]
+
+ if meth['resource']:
+ if not res_id_params:
+ no_resource_id_params = True
+ else:
+ path_params = ["{{{}}}".format(p) for p in res_id_params]
+ path += "/{}".format("/".join(path_params))
+
+ status = meth['status']
+ method = meth['method']
+ if not sec_permissions:
+ permission = cls._permission_map[method]
+
+ elif hasattr(func, "_collection_method_"):
+ if func._collection_method_['path']:
+ path = func._collection_method_['path']
+ else:
+ path = "/{}".format(func.__name__)
+ status = func._collection_method_['status']
+ method = func._collection_method_['method']
+ query_params = func._collection_method_['query_params']
+ if not sec_permissions:
+ permission = cls._permission_map[method]
+
+ elif hasattr(func, "_resource_method_"):
+ if not res_id_params:
+ no_resource_id_params = True
+ else:
+ path_params = ["{{{}}}".format(p) for p in res_id_params]
+ path += "/{}".format("/".join(path_params))
+ if func._resource_method_['path']:
+ path += func._resource_method_['path']
+ else:
+ path += "/{}".format(func.__name__)
+ status = func._resource_method_['status']
+ method = func._resource_method_['method']
+ query_params = func._resource_method_['query_params']
+ if not sec_permissions:
+ permission = cls._permission_map[method]
+
+ else:
+ continue
+
+ if no_resource_id_params:
+ raise TypeError("Could not infer the resource ID parameters for"
+ " method {} of controller {}. "
+ "Please specify the resource ID parameters "
+ "using the RESOURCE_ID class property"
+ .format(func.__name__, cls.__name__))
+
+ if method in ['GET', 'DELETE']:
+ params = _get_function_params(func)
+ if res_id_params is None:
+ res_id_params = []
+ if query_params is None:
+ query_params = [p['name'] for p in params
+ if p['name'] not in res_id_params]
+
+ func = cls._status_code_wrapper(func, status)
+ endp_func = Endpoint(method, path=path,
+ query_params=query_params)(func)
+ if permission:
+ _set_func_permissions(endp_func, [permission])
+ result.append(cls.Endpoint(cls, endp_func))
+
+ return result
+
+ @classmethod
+ def _status_code_wrapper(cls, func, status_code):
+ @wraps(func)
+ def wrapper(*vpath, **params):
+ cherrypy.response.status = status_code
+ return func(*vpath, **params)
+
+ return wrapper
+
+ @staticmethod
+ def Resource(method=None, path=None, status=None, query_params=None):
+ if not method:
+ method = 'GET'
+
+ if status is None:
+ status = 200
+
+ def _wrapper(func):
+ func._resource_method_ = {
+ 'method': method,
+ 'path': path,
+ 'status': status,
+ 'query_params': query_params
+ }
+ return func
+ return _wrapper
+
+ @staticmethod
+ def Collection(method=None, path=None, status=None, query_params=None):
+ if not method:
+ method = 'GET'
+
+ if status is None:
+ status = 200
+
+ def _wrapper(func):
+ func._collection_method_ = {
+ 'method': method,
+ 'path': path,
+ 'status': status,
+ 'query_params': query_params
+ }
+ return func
+ return _wrapper
+
+
+# Role-based access permissions decorators
+
+def _set_func_permissions(func, permissions):
+ if not isinstance(permissions, list):
+ permissions = [permissions]
+
+ for perm in permissions:
+ if not Permission.valid_permission(perm):
+ logger.debug("Invalid security permission: %s\n "
+ "Possible values: %s", perm,
+ Permission.all_permissions())
+ raise PermissionNotValid(perm)
+
+ if not hasattr(func, '_security_permissions'):
+ func._security_permissions = permissions
+ else:
+ permissions.extend(func._security_permissions)
+ func._security_permissions = list(set(permissions))
+
+
+def ReadPermission(func):
+ _set_func_permissions(func, Permission.READ)
+ return func
+
+
+def CreatePermission(func):
+ _set_func_permissions(func, Permission.CREATE)
+ return func
+
+
+def DeletePermission(func):
+ _set_func_permissions(func, Permission.DELETE)
+ return func
+
+
+def UpdatePermission(func):
+ _set_func_permissions(func, Permission.UPDATE)
+ return func
+
+
+# Empty request body decorator
+
+def allow_empty_body(func): # noqa: N802
+ """
+ The POST/PUT request methods decorated with ``@allow_empty_body``
+ are allowed to send empty request body.
+ """
+ try:
+ func._cp_config['tools.json_in.force'] = False
+ except (AttributeError, KeyError):
+ func._cp_config = {'tools.json_in.force': False}
+ return func
+
+
+def set_cookies(url_prefix, token):
+ cherrypy.response.cookie['token'] = token
+ if url_prefix == 'https':
+ cherrypy.response.cookie['token']['secure'] = True
+ cherrypy.response.cookie['token']['HttpOnly'] = True
+ cherrypy.response.cookie['token']['path'] = '/'
+ cherrypy.response.cookie['token']['SameSite'] = 'Strict'
diff --git a/src/pybind/mgr/dashboard/controllers/auth.py b/src/pybind/mgr/dashboard/controllers/auth.py
new file mode 100644
index 00000000..51139bef
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/auth.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+try:
+ import Cookie
+except ImportError:
+ import http.cookies as Cookie
+import sys
+import jwt
+
+from . import ApiController, RESTController, \
+ allow_empty_body, set_cookies
+from .. import logger, mgr
+from ..exceptions import DashboardException
+from ..services.auth import AuthManager, JwtManager
+from ..services.access_control import UserDoesNotExist
+# Python 3.8 introduced `samesite` attribute:
+# https://docs.python.org/3/library/http.cookies.html#morsel-objects
+if sys.version_info < (3, 8):
+ Cookie.Morsel._reserved["samesite"] = "SameSite" # type: ignore # pylint: disable=W0212
+
+
+@ApiController('/auth', secure=False)
+class Auth(RESTController):
+ """
+ Provide authenticates and returns JWT token.
+ """
+
+ def create(self, username, password):
+ user_perms = AuthManager.authenticate(username, password)
+ if user_perms is not None:
+ url_prefix = 'https' if mgr.get_localized_module_option('ssl') else 'http'
+ logger.debug('Login successful')
+ token = JwtManager.gen_token(username)
+
+ # For backward-compatibility: PyJWT versions < 2.0.0 return bytes.
+ token = token.decode('utf-8') if isinstance(token, bytes) else token
+
+ set_cookies(url_prefix, token)
+ return {
+ 'token': token,
+ 'username': username,
+ 'permissions': user_perms
+ }
+
+ logger.debug('Login failed')
+ raise DashboardException(msg='Invalid credentials',
+ code='invalid_credentials',
+ component='auth')
+
+ @RESTController.Collection('POST')
+ @allow_empty_body
+ def logout(self):
+ logger.debug('Logout successful')
+ token = JwtManager.get_token_from_header()
+ JwtManager.blacklist_token(token)
+ redirect_url = '#/login'
+ if mgr.SSO_DB.protocol == 'saml2':
+ redirect_url = 'auth/saml2/slo'
+ return {
+ 'redirect_url': redirect_url
+ }
+
+ def _get_login_url(self):
+ if mgr.SSO_DB.protocol == 'saml2':
+ return 'auth/saml2/login'
+ return '#/login'
+
+ @RESTController.Collection('POST')
+ def check(self, token):
+ if token:
+ try:
+ token = JwtManager.decode_token(token)
+ if not JwtManager.is_blacklisted(token['jti']):
+ user = AuthManager.get_user(token['username'])
+ if user.lastUpdate <= token['iat']:
+ return {
+ 'username': user.username,
+ 'permissions': user.permissions_dict(),
+ }
+
+ logger.debug("AMT: user info changed after token was"
+ " issued, iat=%s lastUpdate=%s",
+ token['iat'], user.lastUpdate)
+ else:
+ logger.debug('AMT: Token is black-listed')
+ except jwt.exceptions.ExpiredSignatureError:
+ logger.debug("AMT: Token has expired")
+ except jwt.exceptions.InvalidTokenError:
+ logger.debug("AMT: Failed to decode token")
+ except UserDoesNotExist:
+ logger.debug("AMT: Invalid token: user %s does not exist",
+ token['username'])
+ return {
+ 'login_url': self._get_login_url()
+ }
diff --git a/src/pybind/mgr/dashboard/controllers/cephfs.py b/src/pybind/mgr/dashboard/controllers/cephfs.py
new file mode 100644
index 00000000..788a865a
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/cephfs.py
@@ -0,0 +1,294 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from collections import defaultdict
+
+import cherrypy
+
+from . import ApiController, RESTController
+from .. import mgr
+from ..exceptions import DashboardException
+from ..security import Scope
+from ..services.ceph_service import CephService
+from ..tools import ViewCache
+
+
+@ApiController('/cephfs', Scope.CEPHFS)
+class CephFS(RESTController):
+ def __init__(self):
+ super(CephFS, self).__init__()
+
+ # Stateful instances of CephFSClients, hold cached results. Key to
+ # dict is FSCID
+ self.cephfs_clients = {}
+
+ def list(self):
+ fsmap = mgr.get("fs_map")
+ return fsmap['filesystems']
+
+ def get(self, fs_id):
+ fs_id = self.fs_id_to_int(fs_id)
+
+ return self.fs_status(fs_id)
+
+ @RESTController.Resource('GET')
+ def clients(self, fs_id):
+ fs_id = self.fs_id_to_int(fs_id)
+
+ return self._clients(fs_id)
+
+ @RESTController.Resource('GET')
+ def mds_counters(self, fs_id):
+ """
+ Result format: map of daemon name to map of counter to list of datapoints
+ rtype: dict[str, dict[str, list]]
+ """
+
+ # Opinionated list of interesting performance counters for the GUI --
+ # if you need something else just add it. See how simple life is
+ # when you don't have to write general purpose APIs?
+ counters = [
+ "mds_server.handle_client_request",
+ "mds_log.ev",
+ "mds_cache.num_strays",
+ "mds.exported",
+ "mds.exported_inodes",
+ "mds.imported",
+ "mds.imported_inodes",
+ "mds.inodes",
+ "mds.caps",
+ "mds.subtrees",
+ "mds_mem.ino"
+ ]
+
+ fs_id = self.fs_id_to_int(fs_id)
+
+ result = {}
+ mds_names = self._get_mds_names(fs_id)
+
+ for mds_name in mds_names:
+ result[mds_name] = {}
+ for counter in counters:
+ data = mgr.get_counter("mds", mds_name, counter)
+ if data is not None:
+ result[mds_name][counter] = data[counter]
+ else:
+ result[mds_name][counter] = []
+
+ return dict(result)
+
+ @staticmethod
+ def fs_id_to_int(fs_id):
+ try:
+ return int(fs_id)
+ except ValueError:
+ raise DashboardException(code='invalid_cephfs_id',
+ msg="Invalid cephfs ID {}".format(fs_id),
+ component='cephfs')
+
+ def _get_mds_names(self, filesystem_id=None):
+ names = []
+
+ fsmap = mgr.get("fs_map")
+ for fs in fsmap['filesystems']:
+ if filesystem_id is not None and fs['id'] != filesystem_id:
+ continue
+ names.extend([info['name']
+ for _, info in fs['mdsmap']['info'].items()])
+
+ if filesystem_id is None:
+ names.extend(info['name'] for info in fsmap['standbys'])
+
+ return names
+
+ def _append_mds_metadata(self, mds_versions, metadata_key):
+ metadata = mgr.get_metadata('mds', metadata_key)
+ if metadata is None:
+ return
+ mds_versions[metadata.get('ceph_version', 'unknown')].append(metadata_key)
+
+ # pylint: disable=too-many-statements,too-many-branches
+ def fs_status(self, fs_id):
+ mds_versions = defaultdict(list)
+
+ fsmap = mgr.get("fs_map")
+ filesystem = None
+ for fs in fsmap['filesystems']:
+ if fs['id'] == fs_id:
+ filesystem = fs
+ break
+
+ if filesystem is None:
+ raise cherrypy.HTTPError(404,
+ "CephFS id {0} not found".format(fs_id))
+
+ rank_table = []
+
+ mdsmap = filesystem['mdsmap']
+
+ client_count = 0
+
+ for rank in mdsmap["in"]:
+ up = "mds_{0}".format(rank) in mdsmap["up"]
+ if up:
+ gid = mdsmap['up']["mds_{0}".format(rank)]
+ info = mdsmap['info']['gid_{0}'.format(gid)]
+ dns = mgr.get_latest("mds", info['name'], "mds_mem.dn")
+ inos = mgr.get_latest("mds", info['name'], "mds_mem.ino")
+
+ if rank == 0:
+ client_count = mgr.get_latest("mds", info['name'],
+ "mds_sessions.session_count")
+ elif client_count == 0:
+ # In case rank 0 was down, look at another rank's
+ # sessionmap to get an indication of clients.
+ client_count = mgr.get_latest("mds", info['name'],
+ "mds_sessions.session_count")
+
+ laggy = "laggy_since" in info
+
+ state = info['state'].split(":")[1]
+ if laggy:
+ state += "(laggy)"
+
+ # Populate based on context of state, e.g. client
+ # ops for an active daemon, replay progress, reconnect
+ # progress
+ if state == "active":
+ activity = CephService.get_rate("mds",
+ info['name'],
+ "mds_server.handle_client_request")
+ else:
+ activity = 0.0
+
+ self._append_mds_metadata(mds_versions, info['name'])
+ rank_table.append(
+ {
+ "rank": rank,
+ "state": state,
+ "mds": info['name'],
+ "activity": activity,
+ "dns": dns,
+ "inos": inos
+ }
+ )
+
+ else:
+ rank_table.append(
+ {
+ "rank": rank,
+ "state": "failed",
+ "mds": "",
+ "activity": 0.0,
+ "dns": 0,
+ "inos": 0
+ }
+ )
+
+ # Find the standby replays
+ # pylint: disable=unused-variable
+ for gid_str, daemon_info in mdsmap['info'].items():
+ if daemon_info['state'] != "up:standby-replay":
+ continue
+
+ inos = mgr.get_latest("mds", daemon_info['name'], "mds_mem.ino")
+ dns = mgr.get_latest("mds", daemon_info['name'], "mds_mem.dn")
+
+ activity = CephService.get_rate(
+ "mds", daemon_info['name'], "mds_log.replay")
+
+ rank_table.append(
+ {
+ "rank": "{0}-s".format(daemon_info['rank']),
+ "state": "standby-replay",
+ "mds": daemon_info['name'],
+ "activity": activity,
+ "dns": dns,
+ "inos": inos
+ }
+ )
+
+ df = mgr.get("df")
+ pool_stats = {p['id']: p['stats'] for p in df['pools']}
+ osdmap = mgr.get("osd_map")
+ pools = {p['pool']: p for p in osdmap['pools']}
+ metadata_pool_id = mdsmap['metadata_pool']
+ data_pool_ids = mdsmap['data_pools']
+
+ pools_table = []
+ for pool_id in [metadata_pool_id] + data_pool_ids:
+ pool_type = "metadata" if pool_id == metadata_pool_id else "data"
+ stats = pool_stats[pool_id]
+ pools_table.append({
+ "pool": pools[pool_id]['pool_name'],
+ "type": pool_type,
+ "used": stats['bytes_used'],
+ "avail": stats['max_avail']
+ })
+
+ standby_table = []
+ for standby in fsmap['standbys']:
+ self._append_mds_metadata(mds_versions, standby['name'])
+ standby_table.append({
+ 'name': standby['name']
+ })
+
+ return {
+ "cephfs": {
+ "id": fs_id,
+ "name": mdsmap['fs_name'],
+ "client_count": client_count,
+ "ranks": rank_table,
+ "pools": pools_table
+ },
+ "standbys": standby_table,
+ "versions": mds_versions
+ }
+
+ def _clients(self, fs_id):
+ cephfs_clients = self.cephfs_clients.get(fs_id, None)
+ if cephfs_clients is None:
+ cephfs_clients = CephFSClients(mgr, fs_id)
+ self.cephfs_clients[fs_id] = cephfs_clients
+
+ try:
+ status, clients = cephfs_clients.get()
+ except AttributeError:
+ raise cherrypy.HTTPError(404,
+ "No cephfs with id {0}".format(fs_id))
+
+ if clients is None:
+ raise cherrypy.HTTPError(404,
+ "No cephfs with id {0}".format(fs_id))
+
+ # Decorate the metadata with some fields that will be
+ # indepdendent of whether it's a kernel or userspace
+ # client, so that the javascript doesn't have to grok that.
+ for client in clients:
+ if "ceph_version" in client['client_metadata']:
+ client['type'] = "userspace"
+ client['version'] = client['client_metadata']['ceph_version']
+ client['hostname'] = client['client_metadata']['hostname']
+ elif "kernel_version" in client['client_metadata']:
+ client['type'] = "kernel"
+ client['version'] = client['client_metadata']['kernel_version']
+ client['hostname'] = client['client_metadata']['hostname']
+ else:
+ client['type'] = "unknown"
+ client['version'] = ""
+ client['hostname'] = ""
+
+ return {
+ 'status': status,
+ 'data': clients
+ }
+
+
+class CephFSClients(object):
+ def __init__(self, module_inst, fscid):
+ self._module = module_inst
+ self.fscid = fscid
+
+ @ViewCache()
+ def get(self):
+ return CephService.send_command('mds', 'session ls', srv_spec='{0}:0'.format(self.fscid))
diff --git a/src/pybind/mgr/dashboard/controllers/cluster_configuration.py b/src/pybind/mgr/dashboard/controllers/cluster_configuration.py
new file mode 100644
index 00000000..8401b09e
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/cluster_configuration.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import cherrypy
+
+from . import ApiController, RESTController
+from .. import mgr
+from ..security import Scope
+from ..services.ceph_service import CephService
+from ..exceptions import DashboardException
+
+
+@ApiController('/cluster_conf', Scope.CONFIG_OPT)
+class ClusterConfiguration(RESTController):
+
+ def _append_config_option_values(self, options):
+ """
+ Appends values from the config database (if available) to the given options
+ :param options: list of config options
+ :return: list of config options extended by their current values
+ """
+ config_dump = CephService.send_command('mon', 'config dump')
+ for config_dump_entry in config_dump:
+ for i, elem in enumerate(options):
+ if config_dump_entry['name'] == elem['name']:
+ if 'value' not in elem:
+ options[i]['value'] = []
+ options[i]['source'] = 'mon'
+
+ options[i]['value'].append({'section': config_dump_entry['section'],
+ 'value': config_dump_entry['value']})
+ return options
+
+ def list(self):
+ options = mgr.get('config_options')['options']
+ return self._append_config_option_values(options)
+
+ def get(self, name):
+ return self._get_config_option(name)
+
+ @RESTController.Collection('GET', query_params=['name'])
+ def filter(self, names=None):
+ config_options = []
+
+ if names:
+ for name in names.split(','):
+ try:
+ config_options.append(self._get_config_option(name))
+ except cherrypy.HTTPError:
+ pass
+
+ if not config_options:
+ raise cherrypy.HTTPError(404, 'Config options `{}` not found'.format(names))
+
+ return config_options
+
+ def create(self, name, value):
+ # Check if config option is updateable at runtime
+ self._updateable_at_runtime([name])
+
+ # Update config option
+ availSections = ['global', 'mon', 'mgr', 'osd', 'mds', 'client']
+
+ for section in availSections:
+ for entry in value:
+ if entry['value'] is None:
+ break
+
+ if entry['section'] == section:
+ CephService.send_command('mon', 'config set', who=section, name=name,
+ value=str(entry['value']))
+ break
+ else:
+ CephService.send_command('mon', 'config rm', who=section, name=name)
+
+ def delete(self, name, section):
+ return CephService.send_command('mon', 'config rm', who=section, name=name)
+
+ def bulk_set(self, options):
+ self._updateable_at_runtime(options.keys())
+
+ for name, value in options.items():
+ CephService.send_command('mon', 'config set', who=value['section'],
+ name=name, value=str(value['value']))
+
+ def _get_config_option(self, name):
+ for option in mgr.get('config_options')['options']:
+ if option['name'] == name:
+ return self._append_config_option_values([option])[0]
+
+ raise cherrypy.HTTPError(404)
+
+ def _updateable_at_runtime(self, config_option_names):
+ not_updateable = []
+
+ for name in config_option_names:
+ config_option = self._get_config_option(name)
+ if not config_option['can_update_at_runtime']:
+ not_updateable.append(name)
+
+ if not_updateable:
+ raise DashboardException(
+ msg='Config option {} is/are not updatable at runtime'.format(
+ ', '.join(not_updateable)),
+ code='config_option_not_updatable_at_runtime',
+ component='cluster_configuration')
diff --git a/src/pybind/mgr/dashboard/controllers/docs.py b/src/pybind/mgr/dashboard/controllers/docs.py
new file mode 100644
index 00000000..70bea7c4
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/docs.py
@@ -0,0 +1,427 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import cherrypy
+
+from . import Controller, BaseController, Endpoint, ENDPOINT_MAP
+from .. import logger, mgr
+
+from ..tools import str_to_bool
+
+
+@Controller('/docs', secure=False)
+class Docs(BaseController):
+
+ @classmethod
+ def _gen_tags(cls, all_endpoints):
+ """ Generates a list of all tags and corresponding descriptions. """
+ # Scenarios to consider:
+ # * Intentionally make up a new tag name at controller => New tag name displayed.
+ # * Misspell or make up a new tag name at endpoint => Neither tag or endpoint displayed.
+ # * Misspell tag name at controller (when referring to another controller) =>
+ # Tag displayed but no endpoints assigned
+ # * Description for a tag added at multiple locations => Only one description displayed.
+ list_of_ctrl = set()
+ for endpoints in ENDPOINT_MAP.values():
+ for endpoint in endpoints:
+ if endpoint.is_api or all_endpoints:
+ list_of_ctrl.add(endpoint.ctrl)
+
+ TAG_MAP = {}
+ for ctrl in list_of_ctrl:
+ tag_name = ctrl.__name__
+ tag_descr = ""
+ if hasattr(ctrl, 'doc_info'):
+ if ctrl.doc_info['tag']:
+ tag_name = ctrl.doc_info['tag']
+ tag_descr = ctrl.doc_info['tag_descr']
+ if tag_name not in TAG_MAP or not TAG_MAP[tag_name]:
+ TAG_MAP[tag_name] = tag_descr
+
+ tags = [{'name': k, 'description': v if v else "*No description available*"}
+ for k, v in TAG_MAP.items()]
+ tags.sort(key=lambda e: e['name'])
+ return tags
+
+ @classmethod
+ def _get_tag(cls, endpoint):
+ """ Returns the name of a tag to assign to a path. """
+ ctrl = endpoint.ctrl
+ func = endpoint.func
+ tag = ctrl.__name__
+ if hasattr(func, 'doc_info') and func.doc_info['tag']:
+ tag = func.doc_info['tag']
+ elif hasattr(ctrl, 'doc_info') and ctrl.doc_info['tag']:
+ tag = ctrl.doc_info['tag']
+ return tag
+
+ @classmethod
+ def _gen_type(cls, param):
+ # pylint: disable=too-many-return-statements
+ """
+ Generates the type of parameter based on its name and default value,
+ using very simple heuristics.
+ Used if type is not explicitly defined.
+ """
+ param_name = param['name']
+ def_value = param['default'] if 'default' in param else None
+ if param_name.startswith("is_"):
+ return "boolean"
+ if "size" in param_name:
+ return "integer"
+ if "count" in param_name:
+ return "integer"
+ if "num" in param_name:
+ return "integer"
+ if isinstance(def_value, bool):
+ return "boolean"
+ if isinstance(def_value, int):
+ return "integer"
+ return "string"
+
+ @classmethod
+ # isinstance doesn't work: input is always <type 'type'>.
+ def _type_to_str(cls, type_as_type):
+ """ Used if type is explicitly defined. """
+ if type_as_type is str:
+ type_as_str = 'string'
+ elif type_as_type is int:
+ type_as_str = 'integer'
+ elif type_as_type is bool:
+ type_as_str = 'boolean'
+ elif type_as_type is list or type_as_type is tuple:
+ type_as_str = 'array'
+ elif type_as_type is float:
+ type_as_str = 'number'
+ else:
+ type_as_str = 'object'
+ return type_as_str
+
+ @classmethod
+ def _add_param_info(cls, parameters, p_info):
+ # Cases to consider:
+ # * Parameter name (if not nested) misspelt in decorator => parameter not displayed
+ # * Sometimes a parameter is used for several endpoints (e.g. fs_id in CephFS).
+ # Currently, there is no possibility of reuse. Should there be?
+ # But what if there are two parameters with same name but different functionality?
+ """
+ Adds explicitly described information for parameters of an endpoint.
+
+ There are two cases:
+ * Either the parameter in p_info corresponds to an endpoint parameter. Implicit information
+ has higher priority, so only information that doesn't already exist is added.
+ * Or the parameter in p_info describes a nested parameter inside an endpoint parameter.
+ In that case there is no implicit information at all so all explicitly described info needs
+ to be added.
+ """
+ for p in p_info:
+ if not p['nested']:
+ for parameter in parameters:
+ if p['name'] == parameter['name']:
+ parameter['type'] = p['type']
+ parameter['description'] = p['description']
+ if 'nested_params' in p:
+ parameter['nested_params'] = cls._add_param_info([], p['nested_params'])
+ else:
+ nested_p = {
+ 'name': p['name'],
+ 'type': p['type'],
+ 'description': p['description'],
+ 'required': p['required'],
+ }
+ if 'default' in p:
+ nested_p['default'] = p['default']
+ if 'nested_params' in p:
+ nested_p['nested_params'] = cls._add_param_info([], p['nested_params'])
+ parameters.append(nested_p)
+
+ return parameters
+
+ @classmethod
+ def _gen_schema_for_content(cls, params):
+ """
+ Generates information to the content-object in OpenAPI Spec.
+ Used to for request body and responses.
+ """
+ required_params = []
+ properties = {}
+
+ for param in params:
+ if param['required']:
+ required_params.append(param['name'])
+
+ props = {}
+ if 'type' in param:
+ props['type'] = cls._type_to_str(param['type'])
+ if 'nested_params' in param:
+ if props['type'] == 'array': # dict in array
+ props['items'] = cls._gen_schema_for_content(param['nested_params'])
+ else: # dict in dict
+ props = cls._gen_schema_for_content(param['nested_params'])
+ elif props['type'] == 'object': # e.g. [int]
+ props['type'] = 'array'
+ props['items'] = {'type': cls._type_to_str(param['type'][0])}
+ else:
+ props['type'] = cls._gen_type(param)
+ if 'description' in param:
+ props['description'] = param['description']
+ if 'default' in param:
+ props['default'] = param['default']
+ properties[param['name']] = props
+
+ schema = {
+ 'type': 'object',
+ 'properties': properties,
+ }
+ if required_params:
+ schema['required'] = required_params
+ return schema
+
+ @classmethod
+ def _gen_responses(cls, method, resp_object=None):
+ resp = {
+ '400': {
+ "description": "Operation exception. Please check the "
+ "response body for details."
+ },
+ '401': {
+ "description": "Unauthenticated access. Please login first."
+ },
+ '403': {
+ "description": "Unauthorized access. Please check your "
+ "permissions."
+ },
+ '500': {
+ "description": "Unexpected error. Please check the "
+ "response body for the stack trace."
+ }
+ }
+ if method.lower() == 'get':
+ resp['200'] = {'description': "OK"}
+ if method.lower() == 'post':
+ resp['201'] = {'description': "Resource created."}
+ if method.lower() == 'put':
+ resp['200'] = {'description': "Resource updated."}
+ if method.lower() == 'delete':
+ resp['204'] = {'description': "Resource deleted."}
+ if method.lower() in ['post', 'put', 'delete']:
+ resp['202'] = {'description': "Operation is still executing."
+ " Please check the task queue."}
+
+ if resp_object:
+ for status_code, response_body in resp_object.items():
+ resp[status_code].update({
+ 'content': {
+ 'application/json': {
+ 'schema': cls._gen_schema_for_content(response_body)}}})
+
+ return resp
+
+ @classmethod
+ def _gen_params(cls, params, location):
+ parameters = []
+ for param in params:
+ if 'type' in param:
+ _type = cls._type_to_str(param['type'])
+ else:
+ _type = cls._gen_type(param)
+ if 'description' in param:
+ descr = param['description']
+ else:
+ descr = "*No description available*"
+ res = {
+ 'name': param['name'],
+ 'in': location,
+ 'schema': {
+ 'type': _type
+ },
+ 'description': descr
+ }
+ if param['required']:
+ res['required'] = True
+ elif param['default'] is None:
+ res['allowEmptyValue'] = True
+ else:
+ res['default'] = param['default']
+ parameters.append(res)
+
+ return parameters
+
+ @classmethod
+ def _gen_paths(cls, all_endpoints, baseUrl):
+ METHOD_ORDER = ['get', 'post', 'put', 'delete']
+ paths = {}
+ for path, endpoints in sorted(list(ENDPOINT_MAP.items()),
+ key=lambda p: p[0]):
+ methods = {}
+ skip = False
+
+ endpoint_list = sorted(endpoints, key=lambda e:
+ METHOD_ORDER.index(e.method.lower()))
+ for endpoint in endpoint_list:
+ if not endpoint.is_api and not all_endpoints:
+ skip = True
+ break
+
+ method = endpoint.method
+ func = endpoint.func
+
+ summary = "No description available"
+ resp = {}
+ p_info = []
+ if hasattr(func, 'doc_info'):
+ if func.doc_info['summary']:
+ summary = func.doc_info['summary']
+ resp = func.doc_info['response']
+ p_info = func.doc_info['parameters']
+ params = []
+ if endpoint.path_params:
+ params.extend(
+ cls._gen_params(
+ cls._add_param_info(endpoint.path_params, p_info), 'path'))
+ if endpoint.query_params:
+ params.extend(
+ cls._gen_params(
+ cls._add_param_info(endpoint.query_params, p_info), 'query'))
+
+ methods[method.lower()] = {
+ 'tags': [cls._get_tag(endpoint)],
+ 'summary': summary,
+ 'description': func.__doc__,
+ 'parameters': params,
+ 'responses': cls._gen_responses(method, resp)
+ }
+
+ if method.lower() in ['post', 'put']:
+ if endpoint.body_params:
+ body_params = cls._add_param_info(endpoint.body_params, p_info)
+ methods[method.lower()]['requestBody'] = {
+ 'content': {
+ 'application/json': {
+ 'schema': cls._gen_schema_for_content(body_params)}}}
+
+ if endpoint.is_secure:
+ methods[method.lower()]['security'] = [{'jwt': []}]
+
+ if not skip:
+ paths[path[len(baseUrl):]] = methods
+
+ return paths
+
+ def _gen_spec(self, all_endpoints=False, base_url=""):
+ if all_endpoints:
+ base_url = ""
+
+ host = cherrypy.request.base
+ host = host[host.index(':')+3:]
+ logger.debug("DOCS: Host: %s", host)
+
+ paths = self._gen_paths(all_endpoints, base_url)
+
+ if not base_url:
+ base_url = "/"
+
+ scheme = 'https'
+ ssl = str_to_bool(mgr.get_localized_module_option('ssl', True))
+ if not ssl:
+ scheme = 'http'
+
+ spec = {
+ 'openapi': "3.0.0",
+ 'info': {
+ 'description': "Please note that this API is not an official "
+ "Ceph REST API to be used by third-party "
+ "applications. It's primary purpose is to serve"
+ " the requirements of the Ceph Dashboard and is"
+ " subject to change at any time. Use at your "
+ "own risk.",
+ 'version': "v1",
+ 'title': "Ceph-Dashboard REST API"
+ },
+ 'host': host,
+ 'basePath': base_url,
+ 'servers': [{'url': "{}{}".format(cherrypy.request.base, base_url)}],
+ 'tags': self._gen_tags(all_endpoints),
+ 'schemes': [scheme],
+ 'paths': paths,
+ 'components': {
+ 'securitySchemes': {
+ 'jwt': {
+ 'type': 'http',
+ 'scheme': 'bearer',
+ 'bearerFormat': 'JWT'
+ }
+ }
+ }
+ }
+
+ return spec
+
+ @Endpoint(path="api.json")
+ def api_json(self):
+ return self._gen_spec(False, "/api")
+
+ @Endpoint(path="api-all.json")
+ def api_all_json(self):
+ return self._gen_spec(True, "/api")
+
+ def _swagger_ui_page(self, all_endpoints=False):
+ base = cherrypy.request.base
+ if all_endpoints:
+ spec_url = "{}/docs/api-all.json".format(base)
+ else:
+ spec_url = "{}/docs/api.json".format(base)
+
+ page = """
+ <!DOCTYPE html>
+ <html>
+ <head>
+ <meta charset="UTF-8">
+ <meta name="referrer" content="no-referrer" />
+ <link rel="stylesheet" type="text/css"
+ href="/swagger-ui.css" >
+ <style>
+ html
+ {{
+ box-sizing: border-box;
+ overflow: -moz-scrollbars-vertical;
+ overflow-y: scroll;
+ }}
+ *,
+ *:before,
+ *:after
+ {{
+ box-sizing: inherit;
+ }}
+ body {{
+ margin:0;
+ background: #fafafa;
+ }}
+ </style>
+ </head>
+ <body>
+ <div id="swagger-ui"></div>
+ <script src="/swagger-ui-bundle.js">
+ </script>
+ <script>
+ window.onload = function() {{
+ const ui = SwaggerUIBundle({{
+ url: '{}',
+ dom_id: '#swagger-ui',
+ presets: [
+ SwaggerUIBundle.presets.apis
+ ],
+ layout: "BaseLayout"
+ }})
+ window.ui = ui
+ }}
+ </script>
+ </body>
+ </html>
+ """.format(spec_url)
+
+ return page
+
+ @Endpoint(json_response=False)
+ def __call__(self, all_endpoints=False):
+ return self._swagger_ui_page(all_endpoints)
diff --git a/src/pybind/mgr/dashboard/controllers/erasure_code_profile.py b/src/pybind/mgr/dashboard/controllers/erasure_code_profile.py
new file mode 100644
index 00000000..34c9f651
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/erasure_code_profile.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from cherrypy import NotFound
+
+from . import ApiController, RESTController, Endpoint, ReadPermission
+from ..security import Scope
+from ..services.ceph_service import CephService
+from .. import mgr
+
+
+def _serialize_ecp(name, ecp):
+ def serialize_numbers(key):
+ value = ecp.get(key)
+ if value is not None:
+ ecp[key] = int(value)
+
+ ecp['name'] = name
+ serialize_numbers('k')
+ serialize_numbers('m')
+ return ecp
+
+
+@ApiController('/erasure_code_profile', Scope.POOL)
+class ErasureCodeProfile(RESTController):
+ '''
+ create() supports additional key-value arguments that are passed to the
+ ECP plugin.
+ '''
+
+ def list(self):
+ ret = []
+ for name, ecp in mgr.get('osd_map').get('erasure_code_profiles', {}).items():
+ ret.append(_serialize_ecp(name, ecp))
+ return ret
+
+ def get(self, name):
+ try:
+ ecp = mgr.get('osd_map')['erasure_code_profiles'][name]
+ return _serialize_ecp(name, ecp)
+ except KeyError:
+ raise NotFound('No such erasure code profile')
+
+ def create(self, name, **kwargs):
+ profile = ['{}={}'.format(key, value) for key, value in kwargs.items()]
+ CephService.send_command('mon', 'osd erasure-code-profile set', name=name,
+ profile=profile)
+
+ def delete(self, name):
+ CephService.send_command('mon', 'osd erasure-code-profile rm', name=name)
+
+ @Endpoint()
+ @ReadPermission
+ def _info(self):
+ '''Used for profile creation and editing'''
+ config = mgr.get('config')
+ osd_map_crush = mgr.get('osd_map_crush')
+ return {
+ # Because 'shec' is experimental it's not included
+ 'plugins': config['osd_erasure_code_plugins'].split() + ['shec'],
+ 'directory': config['erasure_code_dir'],
+ 'devices': list({device['class'] for device in osd_map_crush['devices']}),
+ 'failure_domains': [domain['name'] for domain in osd_map_crush['types']],
+ 'names': [name for name, _ in
+ mgr.get('osd_map').get('erasure_code_profiles', {}).items()]
+ }
diff --git a/src/pybind/mgr/dashboard/controllers/grafana.py b/src/pybind/mgr/dashboard/controllers/grafana.py
new file mode 100644
index 00000000..001367c7
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/grafana.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from . import (ApiController, BaseController, Endpoint, ReadPermission,
+ UpdatePermission)
+from .. import mgr
+from ..exceptions import DashboardException
+from ..grafana import GrafanaRestClient, push_local_dashboards
+from ..security import Scope
+from ..settings import Settings
+
+
+@ApiController('/grafana', Scope.GRAFANA)
+class Grafana(BaseController):
+ @Endpoint()
+ @ReadPermission
+ def url(self):
+ grafana_url = mgr.get_module_option('GRAFANA_API_URL')
+ grafana_frontend_url = mgr.get_module_option('GRAFANA_FRONTEND_API_URL')
+ if grafana_frontend_url != '' and grafana_url == '':
+ url = ''
+ else:
+ url = (mgr.get_module_option('GRAFANA_FRONTEND_API_URL')
+ or mgr.get_module_option('GRAFANA_API_URL')).rstrip('/')
+ response = {'instance': url}
+ return response
+
+ @Endpoint()
+ @ReadPermission
+ def validation(self, params):
+ grafana = GrafanaRestClient()
+ method = 'GET'
+ url = str(Settings.GRAFANA_API_URL).rstrip('/') + \
+ '/api/dashboards/uid/' + params
+ response = grafana.url_validation(method, url)
+ return response
+
+ @Endpoint(method='POST')
+ @UpdatePermission
+ def dashboards(self):
+ response = dict()
+ try:
+ response['success'] = push_local_dashboards()
+ except Exception as e: # pylint: disable=broad-except
+ raise DashboardException(
+ msg=str(e),
+ component='grafana',
+ http_status_code=500,
+ )
+ return response
diff --git a/src/pybind/mgr/dashboard/controllers/health.py b/src/pybind/mgr/dashboard/controllers/health.py
new file mode 100644
index 00000000..fab19bcb
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/health.py
@@ -0,0 +1,189 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import json
+
+from . import ApiController, Endpoint, BaseController
+
+from .. import mgr
+from ..security import Permission, Scope
+from ..services.ceph_service import CephService
+from ..services.iscsi_cli import IscsiGatewaysConfig
+
+
+class HealthData(object):
+ """
+ A class to be used in combination with BaseController to allow either
+ "full" or "minimal" sets of health data to be collected.
+
+ To function properly, it needs BaseCollector._has_permissions to be passed
+ in as ``auth_callback``.
+ """
+
+ def __init__(self, auth_callback, minimal=True):
+ self._has_permissions = auth_callback
+ self._minimal = minimal
+
+ @staticmethod
+ def _partial_dict(orig, keys):
+ return {k: orig[k] for k in keys}
+
+ def all_health(self):
+ result = {
+ "health": self.basic_health(),
+ }
+
+ if self._has_permissions(Permission.READ, Scope.MONITOR):
+ result['mon_status'] = self.mon_status()
+
+ if self._has_permissions(Permission.READ, Scope.CEPHFS):
+ result['fs_map'] = self.fs_map()
+
+ if self._has_permissions(Permission.READ, Scope.OSD):
+ result['osd_map'] = self.osd_map()
+ result['scrub_status'] = self.scrub_status()
+ result['pg_info'] = self.pg_info()
+
+ if self._has_permissions(Permission.READ, Scope.MANAGER):
+ result['mgr_map'] = self.mgr_map()
+
+ if self._has_permissions(Permission.READ, Scope.POOL):
+ result['pools'] = self.pools()
+ result['df'] = self.df()
+ result['client_perf'] = self.client_perf()
+
+ if self._has_permissions(Permission.READ, Scope.HOSTS):
+ result['hosts'] = self.host_count()
+
+ if self._has_permissions(Permission.READ, Scope.RGW):
+ result['rgw'] = self.rgw_count()
+
+ if self._has_permissions(Permission.READ, Scope.ISCSI):
+ result['iscsi_daemons'] = self.iscsi_daemons()
+
+ return result
+
+ def basic_health(self):
+ health_data = mgr.get("health")
+ health = json.loads(health_data['json'])
+
+ # Transform the `checks` dict into a list for the convenience
+ # of rendering from javascript.
+ checks = []
+ for k, v in health['checks'].items():
+ v['type'] = k
+ checks.append(v)
+
+ checks = sorted(checks, key=lambda c: c['severity'])
+ health['checks'] = checks
+ return health
+
+ def client_perf(self):
+ result = CephService.get_client_perf()
+ if self._minimal:
+ result = self._partial_dict(
+ result,
+ ['read_bytes_sec', 'read_op_per_sec',
+ 'recovering_bytes_per_sec', 'write_bytes_sec',
+ 'write_op_per_sec']
+ )
+ return result
+
+ def df(self):
+ df = mgr.get('df')
+
+ del df['stats_by_class']
+
+ if self._minimal:
+ df = dict(stats=self._partial_dict(
+ df['stats'],
+ ['total_avail_bytes', 'total_bytes',
+ 'total_used_raw_bytes']
+ ))
+ return df
+
+ def fs_map(self):
+ fs_map = mgr.get('fs_map')
+ if self._minimal:
+ fs_map = self._partial_dict(fs_map, ['filesystems', 'standbys'])
+ fs_map['filesystems'] = [self._partial_dict(item, ['mdsmap']) for
+ item in fs_map['filesystems']]
+ for fs in fs_map['filesystems']:
+ mdsmap_info = fs['mdsmap']['info']
+ min_mdsmap_info = dict()
+ for k, v in mdsmap_info.items():
+ min_mdsmap_info[k] = self._partial_dict(v, ['state'])
+ return fs_map
+
+ def host_count(self):
+ return len(mgr.list_servers())
+
+ def iscsi_daemons(self):
+ gateways = IscsiGatewaysConfig.get_gateways_config()['gateways']
+ return len(gateways) if gateways else 0
+
+ def mgr_map(self):
+ mgr_map = mgr.get('mgr_map')
+ if self._minimal:
+ mgr_map = self._partial_dict(mgr_map, ['active_name', 'standbys'])
+ return mgr_map
+
+ def mon_status(self):
+ mon_status = json.loads(mgr.get('mon_status')['json'])
+ if self._minimal:
+ mon_status = self._partial_dict(mon_status, ['monmap', 'quorum'])
+ mon_status['monmap'] = self._partial_dict(
+ mon_status['monmap'], ['mons']
+ )
+ mon_status['monmap']['mons'] = [{}] * \
+ len(mon_status['monmap']['mons'])
+ return mon_status
+
+ def osd_map(self):
+ osd_map = mgr.get('osd_map')
+ assert osd_map is not None
+ # Not needed, skip the effort of transmitting this to UI
+ del osd_map['pg_temp']
+ if self._minimal:
+ osd_map = self._partial_dict(osd_map, ['osds'])
+ osd_map['osds'] = [
+ self._partial_dict(item, ['in', 'up'])
+ for item in osd_map['osds']
+ ]
+ else:
+ osd_map['tree'] = mgr.get('osd_map_tree')
+ osd_map['crush'] = mgr.get('osd_map_crush')
+ osd_map['crush_map_text'] = mgr.get('osd_map_crush_map_text')
+ osd_map['osd_metadata'] = mgr.get('osd_metadata')
+ return osd_map
+
+ def pg_info(self):
+ return CephService.get_pg_info()
+
+ def pools(self):
+ pools = CephService.get_pool_list_with_stats()
+ if self._minimal:
+ pools = [{}] * len(pools)
+ return pools
+
+ def rgw_count(self):
+ return len(CephService.get_service_list('rgw'))
+
+ def scrub_status(self):
+ return CephService.get_scrub_status()
+
+
+@ApiController('/health')
+class Health(BaseController):
+ def __init__(self):
+ super(Health, self).__init__()
+ self.health_full = HealthData(self._has_permissions, minimal=False)
+ self.health_minimal = HealthData(self._has_permissions, minimal=True)
+
+ @Endpoint()
+ def full(self):
+ return self.health_full.all_health()
+
+ @Endpoint()
+ def minimal(self):
+ return self.health_minimal.all_health()
diff --git a/src/pybind/mgr/dashboard/controllers/home.py b/src/pybind/mgr/dashboard/controllers/home.py
new file mode 100644
index 00000000..0fbe79f7
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/home.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import os
+import re
+import json
+try:
+ from functools import lru_cache
+except ImportError:
+ from ..plugins.lru_cache import lru_cache
+
+import cherrypy
+from cherrypy.lib.static import serve_file
+
+from . import Controller, UiApiController, BaseController, Proxy, Endpoint
+from .. import mgr, logger
+
+
+class LanguageMixin(object):
+ def __init__(self):
+ self.LANGUAGES = {
+ f
+ for f in os.listdir(mgr.get_frontend_path())
+ if os.path.isdir(os.path.join(mgr.get_frontend_path(), f))
+ }
+ self.LANGUAGES_PATH_MAP = {
+ f.lower(): {
+ 'lang': f,
+ 'path': os.path.join(mgr.get_frontend_path(), f)
+ }
+ for f in self.LANGUAGES
+ }
+ # pre-populating with the primary language subtag.
+ for lang in list(self.LANGUAGES_PATH_MAP.keys()):
+ if '-' in lang:
+ self.LANGUAGES_PATH_MAP[lang.split('-')[0]] = {
+ 'lang': self.LANGUAGES_PATH_MAP[lang]['lang'],
+ 'path': self.LANGUAGES_PATH_MAP[lang]['path']
+ }
+ with open("{}/../package.json".format(mgr.get_frontend_path()),
+ "r") as f:
+ config = json.load(f)
+ self.DEFAULT_LANGUAGE = config['config']['locale']
+ self.DEFAULT_LANGUAGE_PATH = os.path.join(mgr.get_frontend_path(),
+ self.DEFAULT_LANGUAGE)
+ super(LanguageMixin, self).__init__()
+
+
+@Controller("/", secure=False)
+class HomeController(BaseController, LanguageMixin):
+ LANG_TAG_SEQ_RE = re.compile(r'\s*([^,]+)\s*,?\s*')
+ LANG_TAG_RE = re.compile(
+ r'^(?P<locale>[a-zA-Z]{1,8}(-[a-zA-Z0-9]{1,8})?)(;q=(?P<weight>[01]\.\d{0,3}))?$')
+ MAX_ACCEPTED_LANGS = 10
+
+ @lru_cache()
+ def _parse_accept_language(self, accept_lang_header):
+ result = []
+ for i, m in enumerate(self.LANG_TAG_SEQ_RE.finditer(accept_lang_header)):
+ if i >= self.MAX_ACCEPTED_LANGS:
+ logger.debug("reached max accepted languages, skipping remaining")
+ break
+
+ tag_match = self.LANG_TAG_RE.match(m.group(1))
+ if tag_match is None:
+ raise cherrypy.HTTPError(400, "Malformed 'Accept-Language' header")
+ locale = tag_match.group('locale').lower()
+ weight = tag_match.group('weight')
+ if weight:
+ try:
+ ratio = float(weight)
+ except ValueError:
+ raise cherrypy.HTTPError(400, "Malformed 'Accept-Language' header")
+ else:
+ ratio = 1.0
+ result.append((locale, ratio))
+
+ result.sort(key=lambda l: l[0])
+ result.sort(key=lambda l: l[1], reverse=True)
+ logger.debug("language preference: %s", result)
+ return [l[0] for l in result]
+
+ def _language_dir(self, langs):
+ for lang in langs:
+ if lang in self.LANGUAGES_PATH_MAP:
+ logger.debug("found directory for language '%s'", lang)
+ cherrypy.response.headers[
+ 'Content-Language'] = self.LANGUAGES_PATH_MAP[lang]['lang']
+ return self.LANGUAGES_PATH_MAP[lang]['path']
+
+ logger.debug("Languages '%s' not available, falling back to %s",
+ langs, self.DEFAULT_LANGUAGE)
+ cherrypy.response.headers['Content-Language'] = self.DEFAULT_LANGUAGE
+ return self.DEFAULT_LANGUAGE_PATH
+
+ @Proxy()
+ def __call__(self, path, **params):
+ if not path:
+ path = "index.html"
+
+ if 'cd-lang' in cherrypy.request.cookie:
+ langs = [cherrypy.request.cookie['cd-lang'].value.lower()]
+ logger.debug("frontend language from cookie: %s", langs)
+ else:
+ if 'Accept-Language' in cherrypy.request.headers:
+ accept_lang_header = cherrypy.request.headers['Accept-Language']
+ langs = self._parse_accept_language(accept_lang_header)
+ else:
+ langs = [self.DEFAULT_LANGUAGE.lower()]
+ logger.debug("frontend language from headers: %s", langs)
+
+ base_dir = self._language_dir(langs)
+ full_path = os.path.join(base_dir, path)
+
+ # Block uplevel attacks
+ if not os.path.normpath(full_path).startswith(os.path.normpath(base_dir)):
+ raise cherrypy.HTTPError(403) # Forbidden
+
+ logger.debug("serving static content: %s", full_path)
+ if 'Vary' in cherrypy.response.headers:
+ cherrypy.response.headers['Vary'] = "{}, Accept-Language"
+ else:
+ cherrypy.response.headers['Vary'] = "Accept-Language"
+
+ cherrypy.response.headers['Cache-control'] = "no-cache"
+ return serve_file(full_path)
+
+
+@UiApiController("/langs", secure=False)
+class LangsController(BaseController, LanguageMixin):
+ @Endpoint('GET')
+ def __call__(self):
+ return list(self.LANGUAGES)
diff --git a/src/pybind/mgr/dashboard/controllers/host.py b/src/pybind/mgr/dashboard/controllers/host.py
new file mode 100644
index 00000000..e8518a14
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/host.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from . import ApiController, RESTController
+from .. import mgr
+from ..security import Scope
+
+
+@ApiController('/host', Scope.HOSTS)
+class Host(RESTController):
+ def list(self):
+ return mgr.list_servers()
diff --git a/src/pybind/mgr/dashboard/controllers/iscsi.py b/src/pybind/mgr/dashboard/controllers/iscsi.py
new file mode 100644
index 00000000..af753205
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/iscsi.py
@@ -0,0 +1,1049 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=too-many-branches
+# pylint: disable=too-many-lines
+from __future__ import absolute_import
+
+from copy import deepcopy
+import re
+import json
+import cherrypy
+
+import rados
+import rbd
+
+from . import ApiController, UiApiController, RESTController, BaseController, Endpoint,\
+ ReadPermission, UpdatePermission, Task
+from .. import mgr
+from ..rest_client import RequestException
+from ..security import Scope
+from ..services.iscsi_client import IscsiClient
+from ..services.iscsi_cli import IscsiGatewaysConfig
+from ..services.iscsi_config import IscsiGatewayDoesNotExist
+from ..services.rbd import format_bitmask
+from ..services.tcmu_service import TcmuService
+from ..exceptions import DashboardException
+from ..tools import str_to_bool, TaskManager
+
+
+@UiApiController('/iscsi', Scope.ISCSI)
+class IscsiUi(BaseController):
+
+ REQUIRED_CEPH_ISCSI_CONFIG_MIN_VERSION = 10
+ REQUIRED_CEPH_ISCSI_CONFIG_MAX_VERSION = 11
+
+ @Endpoint()
+ @ReadPermission
+ def status(self):
+ status = {'available': False}
+ try:
+ gateway = get_available_gateway()
+ except DashboardException as e:
+ status['message'] = str(e)
+ return status
+ try:
+ config = IscsiClient.instance(gateway_name=gateway).get_config()
+ if config['version'] < IscsiUi.REQUIRED_CEPH_ISCSI_CONFIG_MIN_VERSION or \
+ config['version'] > IscsiUi.REQUIRED_CEPH_ISCSI_CONFIG_MAX_VERSION:
+ status['message'] = 'Unsupported `ceph-iscsi` config version. ' \
+ 'Expected >= {} and <= {} but found' \
+ ' {}.'.format(IscsiUi.REQUIRED_CEPH_ISCSI_CONFIG_MIN_VERSION,
+ IscsiUi.REQUIRED_CEPH_ISCSI_CONFIG_MAX_VERSION,
+ config['version'])
+ return status
+ status['available'] = True
+ except RequestException as e:
+ if e.content:
+ try:
+ content = json.loads(e.content)
+ content_message = content.get('message')
+ except ValueError:
+ content_message = e.content
+ if content_message:
+ status['message'] = content_message
+
+ return status
+
+ @Endpoint()
+ @ReadPermission
+ def version(self):
+ gateway = get_available_gateway()
+ config = IscsiClient.instance(gateway_name=gateway).get_config()
+ return {
+ 'ceph_iscsi_config_version': config['version']
+ }
+
+ @Endpoint()
+ @ReadPermission
+ def settings(self):
+ gateway = get_available_gateway()
+ settings = IscsiClient.instance(gateway_name=gateway).get_settings()
+ if 'target_controls_limits' in settings:
+ target_default_controls = settings['target_default_controls']
+ for ctrl_k, ctrl_v in target_default_controls.items():
+ limits = settings['target_controls_limits'].get(ctrl_k, {})
+ if 'type' not in limits:
+ # default
+ limits['type'] = 'int'
+ # backward compatibility
+ if target_default_controls[ctrl_k] in ['Yes', 'No']:
+ limits['type'] = 'bool'
+ target_default_controls[ctrl_k] = str_to_bool(ctrl_v)
+ settings['target_controls_limits'][ctrl_k] = limits
+ if 'disk_controls_limits' in settings:
+ for backstore, disk_controls_limits in settings['disk_controls_limits'].items():
+ disk_default_controls = settings['disk_default_controls'][backstore]
+ for ctrl_k, ctrl_v in disk_default_controls.items():
+ limits = disk_controls_limits.get(ctrl_k, {})
+ if 'type' not in limits:
+ # default
+ limits['type'] = 'int'
+ settings['disk_controls_limits'][backstore][ctrl_k] = limits
+ return settings
+
+ @Endpoint()
+ @ReadPermission
+ def portals(self):
+ portals = []
+ gateways_config = IscsiGatewaysConfig.get_gateways_config()
+ for name in gateways_config['gateways']:
+ try:
+ ip_addresses = IscsiClient.instance(gateway_name=name).get_ip_addresses()
+ portals.append({'name': name, 'ip_addresses': ip_addresses['data']})
+ except RequestException:
+ pass
+ return sorted(portals, key=lambda p: '{}.{}'.format(p['name'], p['ip_addresses']))
+
+ @Endpoint()
+ @ReadPermission
+ def overview(self):
+ result_gateways = []
+ result_images = []
+ gateways_names = IscsiGatewaysConfig.get_gateways_config()['gateways'].keys()
+ config = None
+ for gateway_name in gateways_names:
+ try:
+ config = IscsiClient.instance(gateway_name=gateway_name).get_config()
+ break
+ except RequestException:
+ pass
+
+ # Gateways info
+ for gateway_name in gateways_names:
+ gateway = {
+ 'name': gateway_name,
+ 'state': '',
+ 'num_targets': 'n/a',
+ 'num_sessions': 'n/a'
+ }
+ try:
+ IscsiClient.instance(gateway_name=gateway_name).ping()
+ gateway['state'] = 'up'
+ if config:
+ gateway['num_sessions'] = 0
+ if gateway_name in config['gateways']:
+ gatewayinfo = IscsiClient.instance(
+ gateway_name=gateway_name).get_gatewayinfo()
+ gateway['num_sessions'] = gatewayinfo['num_sessions']
+ except RequestException:
+ gateway['state'] = 'down'
+ if config:
+ gateway['num_targets'] = len([target for _, target in config['targets'].items()
+ if gateway_name in target['portals']])
+ result_gateways.append(gateway)
+
+ # Images info
+ if config:
+ tcmu_info = TcmuService.get_iscsi_info()
+ for _, disk_config in config['disks'].items():
+ image = {
+ 'pool': disk_config['pool'],
+ 'image': disk_config['image'],
+ 'backstore': disk_config['backstore'],
+ 'optimized_since': None,
+ 'stats': None,
+ 'stats_history': None
+ }
+ tcmu_image_info = TcmuService.get_image_info(image['pool'],
+ image['image'],
+ tcmu_info)
+ if tcmu_image_info:
+ if 'optimized_since' in tcmu_image_info:
+ image['optimized_since'] = tcmu_image_info['optimized_since']
+ if 'stats' in tcmu_image_info:
+ image['stats'] = tcmu_image_info['stats']
+ if 'stats_history' in tcmu_image_info:
+ image['stats_history'] = tcmu_image_info['stats_history']
+ result_images.append(image)
+
+ return {
+ 'gateways': sorted(result_gateways, key=lambda g: g['name']),
+ 'images': sorted(result_images, key=lambda i: '{}/{}'.format(i['pool'], i['image']))
+ }
+
+
+@ApiController('/iscsi', Scope.ISCSI)
+class Iscsi(BaseController):
+
+ @Endpoint('GET', 'discoveryauth')
+ @ReadPermission
+ def get_discoveryauth(self):
+ gateway = get_available_gateway()
+ return self._get_discoveryauth(gateway)
+
+ @Endpoint('PUT', 'discoveryauth')
+ @UpdatePermission
+ def set_discoveryauth(self, user, password, mutual_user, mutual_password):
+ validate_auth({
+ 'user': user,
+ 'password': password,
+ 'mutual_user': mutual_user,
+ 'mutual_password': mutual_password
+ })
+
+ gateway = get_available_gateway()
+ config = IscsiClient.instance(gateway_name=gateway).get_config()
+ gateway_names = list(config['gateways'].keys())
+ validate_rest_api(gateway_names)
+ IscsiClient.instance(gateway_name=gateway).update_discoveryauth(user,
+ password,
+ mutual_user,
+ mutual_password)
+ return self._get_discoveryauth(gateway)
+
+ def _get_discoveryauth(self, gateway):
+ config = IscsiClient.instance(gateway_name=gateway).get_config()
+ user = config['discovery_auth']['username']
+ password = config['discovery_auth']['password']
+ mutual_user = config['discovery_auth']['mutual_username']
+ mutual_password = config['discovery_auth']['mutual_password']
+ return {
+ 'user': user,
+ 'password': password,
+ 'mutual_user': mutual_user,
+ 'mutual_password': mutual_password
+ }
+
+
+def iscsi_target_task(name, metadata, wait_for=2.0):
+ return Task("iscsi/target/{}".format(name), metadata, wait_for)
+
+
+@ApiController('/iscsi/target', Scope.ISCSI)
+class IscsiTarget(RESTController):
+
+ def list(self):
+ gateway = get_available_gateway()
+ config = IscsiClient.instance(gateway_name=gateway).get_config()
+ targets = []
+ for target_iqn in config['targets'].keys():
+ target = IscsiTarget._config_to_target(target_iqn, config)
+ IscsiTarget._set_info(target)
+ targets.append(target)
+ return targets
+
+ def get(self, target_iqn):
+ gateway = get_available_gateway()
+ config = IscsiClient.instance(gateway_name=gateway).get_config()
+ if target_iqn not in config['targets']:
+ raise cherrypy.HTTPError(404)
+ target = IscsiTarget._config_to_target(target_iqn, config)
+ IscsiTarget._set_info(target)
+ return target
+
+ @iscsi_target_task('delete', {'target_iqn': '{target_iqn}'})
+ def delete(self, target_iqn):
+ gateway = get_available_gateway()
+ config = IscsiClient.instance(gateway_name=gateway).get_config()
+ if target_iqn not in config['targets']:
+ raise DashboardException(msg='Target does not exist',
+ code='target_does_not_exist',
+ component='iscsi')
+ portal_names = list(config['targets'][target_iqn]['portals'].keys())
+ validate_rest_api(portal_names)
+ if portal_names:
+ portal_name = portal_names[0]
+ target_info = IscsiClient.instance(gateway_name=portal_name).get_targetinfo(target_iqn)
+ if target_info['num_sessions'] > 0:
+ raise DashboardException(msg='Target has active sessions',
+ code='target_has_active_sessions',
+ component='iscsi')
+ IscsiTarget._delete(target_iqn, config, 0, 100)
+
+ @iscsi_target_task('create', {'target_iqn': '{target_iqn}'})
+ def create(self, target_iqn=None, target_controls=None, acl_enabled=None,
+ auth=None, portals=None, disks=None, clients=None, groups=None):
+ target_controls = target_controls or {}
+ portals = portals or []
+ disks = disks or []
+ clients = clients or []
+ groups = groups or []
+
+ validate_auth(auth)
+ for client in clients:
+ validate_auth(client['auth'])
+
+ gateway = get_available_gateway()
+ config = IscsiClient.instance(gateway_name=gateway).get_config()
+ if target_iqn in config['targets']:
+ raise DashboardException(msg='Target already exists',
+ code='target_already_exists',
+ component='iscsi')
+ settings = IscsiClient.instance(gateway_name=gateway).get_settings()
+ IscsiTarget._validate(target_iqn, target_controls, portals, disks, groups, settings)
+
+ IscsiTarget._create(target_iqn, target_controls, acl_enabled, auth, portals, disks,
+ clients, groups, 0, 100, config, settings)
+
+ @iscsi_target_task('edit', {'target_iqn': '{target_iqn}'})
+ def set(self, target_iqn, new_target_iqn=None, target_controls=None, acl_enabled=None,
+ auth=None, portals=None, disks=None, clients=None, groups=None):
+ target_controls = target_controls or {}
+ portals = IscsiTarget._sorted_portals(portals)
+ disks = IscsiTarget._sorted_disks(disks)
+ clients = IscsiTarget._sorted_clients(clients)
+ groups = IscsiTarget._sorted_groups(groups)
+
+ validate_auth(auth)
+ for client in clients:
+ validate_auth(client['auth'])
+
+ gateway = get_available_gateway()
+ config = IscsiClient.instance(gateway_name=gateway).get_config()
+ if target_iqn not in config['targets']:
+ raise DashboardException(msg='Target does not exist',
+ code='target_does_not_exist',
+ component='iscsi')
+ if target_iqn != new_target_iqn and new_target_iqn in config['targets']:
+ raise DashboardException(msg='Target IQN already in use',
+ code='target_iqn_already_in_use',
+ component='iscsi')
+
+ settings = IscsiClient.instance(gateway_name=gateway).get_settings()
+ new_portal_names = {p['host'] for p in portals}
+ old_portal_names = set(config['targets'][target_iqn]['portals'].keys())
+ deleted_portal_names = list(old_portal_names - new_portal_names)
+ validate_rest_api(deleted_portal_names)
+ IscsiTarget._validate(new_target_iqn, target_controls, portals, disks, groups, settings)
+ IscsiTarget._validate_delete(gateway, target_iqn, config, new_target_iqn, target_controls,
+ disks, clients, groups)
+ config = IscsiTarget._delete(target_iqn, config, 0, 50, new_target_iqn, target_controls,
+ portals, disks, clients, groups)
+ IscsiTarget._create(new_target_iqn, target_controls, acl_enabled, auth, portals, disks,
+ clients, groups, 50, 100, config, settings)
+
+ @staticmethod
+ def _delete(target_iqn, config, task_progress_begin, task_progress_end, new_target_iqn=None,
+ new_target_controls=None, new_portals=None, new_disks=None, new_clients=None,
+ new_groups=None):
+ new_target_controls = new_target_controls or {}
+ new_portals = new_portals or []
+ new_disks = new_disks or []
+ new_clients = new_clients or []
+ new_groups = new_groups or []
+
+ TaskManager.current_task().set_progress(task_progress_begin)
+ target_config = config['targets'][target_iqn]
+ if not target_config['portals'].keys():
+ raise DashboardException(msg="Cannot delete a target that doesn't contain any portal",
+ code='cannot_delete_target_without_portals',
+ component='iscsi')
+ target = IscsiTarget._config_to_target(target_iqn, config)
+ n_groups = len(target_config['groups'])
+ n_clients = len(target_config['clients'])
+ n_target_disks = len(target_config['disks'])
+ task_progress_steps = n_groups + n_clients + n_target_disks
+ task_progress_inc = 0
+ if task_progress_steps != 0:
+ task_progress_inc = int((task_progress_end - task_progress_begin) / task_progress_steps)
+ gateway_name = list(target_config['portals'].keys())[0]
+ deleted_groups = []
+ for group_id in list(target_config['groups'].keys()):
+ if IscsiTarget._group_deletion_required(target, new_target_iqn, new_target_controls,
+ new_groups, group_id):
+ deleted_groups.append(group_id)
+ IscsiClient.instance(gateway_name=gateway_name).delete_group(target_iqn,
+ group_id)
+ else:
+ group = IscsiTarget._get_group(new_groups, group_id)
+
+ old_group_disks = set(target_config['groups'][group_id]['disks'].keys())
+ new_group_disks = {'{}/{}'.format(x['pool'], x['image']) for x in group['disks']}
+ local_deleted_disks = list(old_group_disks - new_group_disks)
+
+ old_group_members = set(target_config['groups'][group_id]['members'])
+ new_group_members = set(group['members'])
+ local_deleted_members = list(old_group_members - new_group_members)
+
+ if local_deleted_disks or local_deleted_members:
+ IscsiClient.instance(gateway_name=gateway_name).update_group(
+ target_iqn, group_id, local_deleted_members, local_deleted_disks)
+ TaskManager.current_task().inc_progress(task_progress_inc)
+ deleted_clients = []
+ deleted_client_luns = []
+ for client_iqn, client_config in target_config['clients'].items():
+ if IscsiTarget._client_deletion_required(target, new_target_iqn, new_target_controls,
+ new_clients, client_iqn):
+ deleted_clients.append(client_iqn)
+ IscsiClient.instance(gateway_name=gateway_name).delete_client(target_iqn,
+ client_iqn)
+ else:
+ for image_id in list(client_config.get('luns', {}).keys()):
+ if IscsiTarget._client_lun_deletion_required(target, client_iqn, image_id,
+ new_clients, new_groups):
+ deleted_client_luns.append((client_iqn, image_id))
+ IscsiClient.instance(gateway_name=gateway_name).delete_client_lun(
+ target_iqn, client_iqn, image_id)
+ TaskManager.current_task().inc_progress(task_progress_inc)
+ for image_id in target_config['disks']:
+ if IscsiTarget._target_lun_deletion_required(target, new_target_iqn,
+ new_target_controls, new_disks, image_id):
+ all_clients = target_config['clients'].keys()
+ not_deleted_clients = [c for c in all_clients if c not in deleted_clients
+ and not IscsiTarget._client_in_group(target['groups'], c)
+ and not IscsiTarget._client_in_group(new_groups, c)]
+ for client_iqn in not_deleted_clients:
+ client_image_ids = target_config['clients'][client_iqn]['luns'].keys()
+ for client_image_id in client_image_ids:
+ if image_id == client_image_id and \
+ (client_iqn, client_image_id) not in deleted_client_luns:
+ IscsiClient.instance(gateway_name=gateway_name).delete_client_lun(
+ target_iqn, client_iqn, client_image_id)
+ IscsiClient.instance(gateway_name=gateway_name).delete_target_lun(target_iqn,
+ image_id)
+ pool, image = image_id.split('/', 1)
+ IscsiClient.instance(gateway_name=gateway_name).delete_disk(pool, image)
+ TaskManager.current_task().inc_progress(task_progress_inc)
+ old_portals_by_host = IscsiTarget._get_portals_by_host(target['portals'])
+ new_portals_by_host = IscsiTarget._get_portals_by_host(new_portals)
+ for old_portal_host, old_portal_ip_list in old_portals_by_host.items():
+ if IscsiTarget._target_portal_deletion_required(old_portal_host,
+ old_portal_ip_list,
+ new_portals_by_host):
+ IscsiClient.instance(gateway_name=gateway_name).delete_gateway(target_iqn,
+ old_portal_host)
+ if IscsiTarget._target_deletion_required(target, new_target_iqn, new_target_controls):
+ IscsiClient.instance(gateway_name=gateway_name).delete_target(target_iqn)
+ TaskManager.current_task().set_progress(task_progress_end)
+ return IscsiClient.instance(gateway_name=gateway_name).get_config()
+
+ @staticmethod
+ def _get_group(groups, group_id):
+ for group in groups:
+ if group['group_id'] == group_id:
+ return group
+ return None
+
+ @staticmethod
+ def _group_deletion_required(target, new_target_iqn, new_target_controls,
+ new_groups, group_id):
+ if IscsiTarget._target_deletion_required(target, new_target_iqn, new_target_controls):
+ return True
+ new_group = IscsiTarget._get_group(new_groups, group_id)
+ if not new_group:
+ return True
+ return False
+
+ @staticmethod
+ def _get_client(clients, client_iqn):
+ for client in clients:
+ if client['client_iqn'] == client_iqn:
+ return client
+ return None
+
+ @staticmethod
+ def _client_deletion_required(target, new_target_iqn, new_target_controls,
+ new_clients, client_iqn):
+ if IscsiTarget._target_deletion_required(target, new_target_iqn, new_target_controls):
+ return True
+ new_client = IscsiTarget._get_client(new_clients, client_iqn)
+ if not new_client:
+ return True
+ return False
+
+ @staticmethod
+ def _client_in_group(groups, client_iqn):
+ for group in groups:
+ if client_iqn in group['members']:
+ return True
+ return False
+
+ @staticmethod
+ def _client_lun_deletion_required(target, client_iqn, image_id, new_clients, new_groups):
+ new_client = IscsiTarget._get_client(new_clients, client_iqn)
+ if not new_client:
+ return True
+
+ # Disks inherited from groups must be considered
+ was_in_group = IscsiTarget._client_in_group(target['groups'], client_iqn)
+ is_in_group = IscsiTarget._client_in_group(new_groups, client_iqn)
+
+ if not was_in_group and is_in_group:
+ return True
+
+ if is_in_group:
+ return False
+
+ new_lun = IscsiTarget._get_disk(new_client.get('luns', []), image_id)
+ if not new_lun:
+ return True
+
+ old_client = IscsiTarget._get_client(target['clients'], client_iqn)
+ if not old_client:
+ return False
+
+ old_lun = IscsiTarget._get_disk(old_client.get('luns', []), image_id)
+ return new_lun != old_lun
+
+ @staticmethod
+ def _get_disk(disks, image_id):
+ for disk in disks:
+ if '{}/{}'.format(disk['pool'], disk['image']) == image_id:
+ return disk
+ return None
+
+ @staticmethod
+ def _target_lun_deletion_required(target, new_target_iqn, new_target_controls,
+ new_disks, image_id):
+ if IscsiTarget._target_deletion_required(target, new_target_iqn, new_target_controls):
+ return True
+ new_disk = IscsiTarget._get_disk(new_disks, image_id)
+ if not new_disk:
+ return True
+ old_disk = IscsiTarget._get_disk(target['disks'], image_id)
+ new_disk_without_controls = deepcopy(new_disk)
+ new_disk_without_controls.pop('controls')
+ old_disk_without_controls = deepcopy(old_disk)
+ old_disk_without_controls.pop('controls')
+ if new_disk_without_controls != old_disk_without_controls:
+ return True
+ return False
+
+ @staticmethod
+ def _target_portal_deletion_required(old_portal_host, old_portal_ip_list, new_portals_by_host):
+ if old_portal_host not in new_portals_by_host:
+ return True
+ if sorted(old_portal_ip_list) != sorted(new_portals_by_host[old_portal_host]):
+ return True
+ return False
+
+ @staticmethod
+ def _target_deletion_required(target, new_target_iqn, new_target_controls):
+ gateway = get_available_gateway()
+ settings = IscsiClient.instance(gateway_name=gateway).get_settings()
+
+ if target['target_iqn'] != new_target_iqn:
+ return True
+ if settings['api_version'] < 2 and target['target_controls'] != new_target_controls:
+ return True
+ return False
+
+ @staticmethod
+ def _validate(target_iqn, target_controls, portals, disks, groups, settings):
+ if not target_iqn:
+ raise DashboardException(msg='Target IQN is required',
+ code='target_iqn_required',
+ component='iscsi')
+
+ minimum_gateways = max(1, settings['config']['minimum_gateways'])
+ portals_by_host = IscsiTarget._get_portals_by_host(portals)
+ if len(portals_by_host.keys()) < minimum_gateways:
+ if minimum_gateways == 1:
+ msg = 'At least one portal is required'
+ else:
+ msg = 'At least {} portals are required'.format(minimum_gateways)
+ raise DashboardException(msg=msg,
+ code='portals_required',
+ component='iscsi')
+
+ # 'target_controls_limits' was introduced in ceph-iscsi > 3.2
+ # When using an older `ceph-iscsi` version these validations will
+ # NOT be executed beforehand
+ if 'target_controls_limits' in settings:
+ for target_control_name, target_control_value in target_controls.items():
+ limits = settings['target_controls_limits'].get(target_control_name)
+ if limits is not None:
+ min_value = limits.get('min')
+ if min_value is not None and target_control_value < min_value:
+ raise DashboardException(msg='Target control {} must be >= '
+ '{}'.format(target_control_name, min_value),
+ code='target_control_invalid_min',
+ component='iscsi')
+ max_value = limits.get('max')
+ if max_value is not None and target_control_value > max_value:
+ raise DashboardException(msg='Target control {} must be <= '
+ '{}'.format(target_control_name, max_value),
+ code='target_control_invalid_max',
+ component='iscsi')
+
+ portal_names = [p['host'] for p in portals]
+ validate_rest_api(portal_names)
+
+ for disk in disks:
+ pool = disk['pool']
+ image = disk['image']
+ backstore = disk['backstore']
+ required_rbd_features = settings['required_rbd_features'][backstore]
+ unsupported_rbd_features = settings['unsupported_rbd_features'][backstore]
+ IscsiTarget._validate_image(pool, image, backstore, required_rbd_features,
+ unsupported_rbd_features)
+
+ # 'disk_controls_limits' was introduced in ceph-iscsi > 3.2
+ # When using an older `ceph-iscsi` version these validations will
+ # NOT be executed beforehand
+ if 'disk_controls_limits' in settings:
+ for disk_control_name, disk_control_value in disk['controls'].items():
+ limits = settings['disk_controls_limits'][backstore].get(disk_control_name)
+ if limits is not None:
+ min_value = limits.get('min')
+ if min_value is not None and disk_control_value < min_value:
+ raise DashboardException(msg='Disk control {} must be >= '
+ '{}'.format(disk_control_name, min_value),
+ code='disk_control_invalid_min',
+ component='iscsi')
+ max_value = limits.get('max')
+ if max_value is not None and disk_control_value > max_value:
+ raise DashboardException(msg='Disk control {} must be <= '
+ '{}'.format(disk_control_name, max_value),
+ code='disk_control_invalid_max',
+ component='iscsi')
+
+ initiators = []
+ for group in groups:
+ initiators = initiators + group['members']
+ if len(initiators) != len(set(initiators)):
+ raise DashboardException(msg='Each initiator can only be part of 1 group at a time',
+ code='initiator_in_multiple_groups',
+ component='iscsi')
+
+ @staticmethod
+ def _validate_image(pool, image, backstore, required_rbd_features, unsupported_rbd_features):
+ try:
+ ioctx = mgr.rados.open_ioctx(pool)
+ try:
+ with rbd.Image(ioctx, image) as img:
+ if img.features() & required_rbd_features != required_rbd_features:
+ raise DashboardException(msg='Image {} cannot be exported using {} '
+ 'backstore because required features are '
+ 'missing (required features are '
+ '{})'.format(image,
+ backstore,
+ format_bitmask(
+ required_rbd_features)),
+ code='image_missing_required_features',
+ component='iscsi')
+ if img.features() & unsupported_rbd_features != 0:
+ raise DashboardException(msg='Image {} cannot be exported using {} '
+ 'backstore because it contains unsupported '
+ 'features ('
+ '{})'.format(image,
+ backstore,
+ format_bitmask(
+ unsupported_rbd_features)),
+ code='image_contains_unsupported_features',
+ component='iscsi')
+
+ except rbd.ImageNotFound:
+ raise DashboardException(msg='Image {} does not exist'.format(image),
+ code='image_does_not_exist',
+ component='iscsi')
+ except rados.ObjectNotFound:
+ raise DashboardException(msg='Pool {} does not exist'.format(pool),
+ code='pool_does_not_exist',
+ component='iscsi')
+
+ @staticmethod
+ def _validate_delete(gateway, target_iqn, config, new_target_iqn=None, new_target_controls=None,
+ new_disks=None, new_clients=None, new_groups=None):
+ new_target_controls = new_target_controls or {}
+ new_disks = new_disks or []
+ new_clients = new_clients or []
+ new_groups = new_groups or []
+
+ target_config = config['targets'][target_iqn]
+ target = IscsiTarget._config_to_target(target_iqn, config)
+ for client_iqn in list(target_config['clients'].keys()):
+ if IscsiTarget._client_deletion_required(target, new_target_iqn, new_target_controls,
+ new_clients, client_iqn):
+ client_info = IscsiClient.instance(gateway_name=gateway).get_clientinfo(target_iqn,
+ client_iqn)
+ if client_info.get('state', {}).get('LOGGED_IN', []):
+ raise DashboardException(msg="Client '{}' cannot be deleted until it's logged "
+ "out".format(client_iqn),
+ code='client_logged_in',
+ component='iscsi')
+
+ @staticmethod
+ def _update_targetauth(config, target_iqn, auth, gateway_name):
+ # Target level authentication was introduced in ceph-iscsi config v11
+ if config['version'] > 10:
+ user = auth['user']
+ password = auth['password']
+ mutual_user = auth['mutual_user']
+ mutual_password = auth['mutual_password']
+ IscsiClient.instance(gateway_name=gateway_name).update_targetauth(target_iqn,
+ user,
+ password,
+ mutual_user,
+ mutual_password)
+
+ @staticmethod
+ def _update_targetacl(target_config, target_iqn, acl_enabled, gateway_name):
+ if not target_config or target_config['acl_enabled'] != acl_enabled:
+ targetauth_action = ('enable_acl' if acl_enabled else 'disable_acl')
+ IscsiClient.instance(gateway_name=gateway_name).update_targetacl(target_iqn,
+ targetauth_action)
+
+ @staticmethod
+ def _is_auth_equal(auth_config, auth):
+ return auth['user'] == auth_config['username'] and \
+ auth['password'] == auth_config['password'] and \
+ auth['mutual_user'] == auth_config['mutual_username'] and \
+ auth['mutual_password'] == auth_config['mutual_password']
+
+ @staticmethod
+ def _create(target_iqn, target_controls, acl_enabled,
+ auth, portals, disks, clients, groups,
+ task_progress_begin, task_progress_end, config, settings):
+ target_config = config['targets'].get(target_iqn, None)
+ TaskManager.current_task().set_progress(task_progress_begin)
+ portals_by_host = IscsiTarget._get_portals_by_host(portals)
+ n_hosts = len(portals_by_host)
+ n_disks = len(disks)
+ n_clients = len(clients)
+ n_groups = len(groups)
+ task_progress_steps = n_hosts + n_disks + n_clients + n_groups
+ task_progress_inc = 0
+ if task_progress_steps != 0:
+ task_progress_inc = int((task_progress_end - task_progress_begin) / task_progress_steps)
+ try:
+ gateway_name = portals[0]['host']
+ if not target_config:
+ IscsiClient.instance(gateway_name=gateway_name).create_target(target_iqn,
+ target_controls)
+ for host, ip_list in portals_by_host.items():
+ if not target_config or host not in target_config['portals']:
+ IscsiClient.instance(gateway_name=gateway_name).create_gateway(target_iqn,
+ host,
+ ip_list)
+ TaskManager.current_task().inc_progress(task_progress_inc)
+
+ if not target_config or \
+ acl_enabled != target_config['acl_enabled'] or \
+ not IscsiTarget._is_auth_equal(target_config['auth'], auth):
+ if acl_enabled:
+ IscsiTarget._update_targetauth(config, target_iqn, auth, gateway_name)
+ IscsiTarget._update_targetacl(target_config, target_iqn, acl_enabled,
+ gateway_name)
+ else:
+ IscsiTarget._update_targetacl(target_config, target_iqn, acl_enabled,
+ gateway_name)
+ IscsiTarget._update_targetauth(config, target_iqn, auth, gateway_name)
+
+ for disk in disks:
+ pool = disk['pool']
+ image = disk['image']
+ image_id = '{}/{}'.format(pool, image)
+ backstore = disk['backstore']
+ wwn = disk.get('wwn')
+ lun = disk.get('lun')
+ if image_id not in config['disks']:
+ IscsiClient.instance(gateway_name=gateway_name).create_disk(pool,
+ image,
+ backstore,
+ wwn)
+ if not target_config or image_id not in target_config['disks']:
+ IscsiClient.instance(gateway_name=gateway_name).create_target_lun(target_iqn,
+ image_id,
+ lun)
+
+ controls = disk['controls']
+ d_conf_controls = {}
+ if image_id in config['disks']:
+ d_conf_controls = config['disks'][image_id]['controls']
+ disk_default_controls = settings['disk_default_controls'][backstore]
+ for old_control in d_conf_controls.keys():
+ # If control was removed, restore the default value
+ if old_control not in controls:
+ controls[old_control] = disk_default_controls[old_control]
+
+ if (image_id not in config['disks'] or d_conf_controls != controls) and controls:
+ IscsiClient.instance(gateway_name=gateway_name).reconfigure_disk(pool,
+ image,
+ controls)
+ TaskManager.current_task().inc_progress(task_progress_inc)
+ for client in clients:
+ client_iqn = client['client_iqn']
+ if not target_config or client_iqn not in target_config['clients']:
+ IscsiClient.instance(gateway_name=gateway_name).create_client(target_iqn,
+ client_iqn)
+ if not target_config or client_iqn not in target_config['clients'] or \
+ not IscsiTarget._is_auth_equal(target_config['clients'][client_iqn]['auth'],
+ client['auth']):
+ user = client['auth']['user']
+ password = client['auth']['password']
+ m_user = client['auth']['mutual_user']
+ m_password = client['auth']['mutual_password']
+ IscsiClient.instance(gateway_name=gateway_name).create_client_auth(
+ target_iqn, client_iqn, user, password, m_user, m_password)
+ for lun in client['luns']:
+ pool = lun['pool']
+ image = lun['image']
+ image_id = '{}/{}'.format(pool, image)
+ # Disks inherited from groups must be considered
+ group_disks = []
+ for group in groups:
+ if client_iqn in group['members']:
+ group_disks = ['{}/{}'.format(x['pool'], x['image'])
+ for x in group['disks']]
+ if not target_config or client_iqn not in target_config['clients'] or \
+ (image_id not in target_config['clients'][client_iqn]['luns']
+ and image_id not in group_disks):
+ IscsiClient.instance(gateway_name=gateway_name).create_client_lun(
+ target_iqn, client_iqn, image_id)
+ TaskManager.current_task().inc_progress(task_progress_inc)
+ for group in groups:
+ group_id = group['group_id']
+ members = group['members']
+ image_ids = []
+ for disk in group['disks']:
+ image_ids.append('{}/{}'.format(disk['pool'], disk['image']))
+
+ if target_config and group_id in target_config['groups']:
+ old_members = target_config['groups'][group_id]['members']
+ old_disks = target_config['groups'][group_id]['disks'].keys()
+
+ if not target_config or group_id not in target_config['groups'] or \
+ list(set(group['members']) - set(old_members)) or \
+ list(set(image_ids) - set(old_disks)):
+ IscsiClient.instance(gateway_name=gateway_name).create_group(
+ target_iqn, group_id, members, image_ids)
+ TaskManager.current_task().inc_progress(task_progress_inc)
+ if target_controls:
+ if not target_config or target_controls != target_config['controls']:
+ IscsiClient.instance(gateway_name=gateway_name).reconfigure_target(
+ target_iqn, target_controls)
+ TaskManager.current_task().set_progress(task_progress_end)
+ except RequestException as e:
+ if e.content:
+ content = json.loads(e.content)
+ content_message = content.get('message')
+ if content_message:
+ raise DashboardException(msg=content_message, component='iscsi')
+ raise DashboardException(e=e, component='iscsi')
+
+ @staticmethod
+ def _config_to_target(target_iqn, config):
+ target_config = config['targets'][target_iqn]
+ portals = []
+ for host, portal_config in target_config['portals'].items():
+ for portal_ip in portal_config['portal_ip_addresses']:
+ portal = {
+ 'host': host,
+ 'ip': portal_ip
+ }
+ portals.append(portal)
+ portals = IscsiTarget._sorted_portals(portals)
+ disks = []
+ for target_disk in target_config['disks']:
+ disk_config = config['disks'][target_disk]
+ disk = {
+ 'pool': disk_config['pool'],
+ 'image': disk_config['image'],
+ 'controls': disk_config['controls'],
+ 'backstore': disk_config['backstore'],
+ 'wwn': disk_config['wwn']
+ }
+ # lun_id was introduced in ceph-iscsi config v11
+ if config['version'] > 10:
+ disk['lun'] = target_config['disks'][target_disk]['lun_id']
+ disks.append(disk)
+ disks = IscsiTarget._sorted_disks(disks)
+ clients = []
+ for client_iqn, client_config in target_config['clients'].items():
+ luns = []
+ for client_lun in client_config['luns'].keys():
+ pool, image = client_lun.split('/', 1)
+ lun = {
+ 'pool': pool,
+ 'image': image
+ }
+ luns.append(lun)
+ user = client_config['auth']['username']
+ password = client_config['auth']['password']
+ mutual_user = client_config['auth']['mutual_username']
+ mutual_password = client_config['auth']['mutual_password']
+ client = {
+ 'client_iqn': client_iqn,
+ 'luns': luns,
+ 'auth': {
+ 'user': user,
+ 'password': password,
+ 'mutual_user': mutual_user,
+ 'mutual_password': mutual_password
+ }
+ }
+ clients.append(client)
+ clients = IscsiTarget._sorted_clients(clients)
+ groups = []
+ for group_id, group_config in target_config['groups'].items():
+ group_disks = []
+ for group_disk_key, _ in group_config['disks'].items():
+ pool, image = group_disk_key.split('/', 1)
+ group_disk = {
+ 'pool': pool,
+ 'image': image
+ }
+ group_disks.append(group_disk)
+ group = {
+ 'group_id': group_id,
+ 'disks': group_disks,
+ 'members': group_config['members'],
+ }
+ groups.append(group)
+ groups = IscsiTarget._sorted_groups(groups)
+ target_controls = target_config['controls']
+ acl_enabled = target_config['acl_enabled']
+ target = {
+ 'target_iqn': target_iqn,
+ 'portals': portals,
+ 'disks': disks,
+ 'clients': clients,
+ 'groups': groups,
+ 'target_controls': target_controls,
+ 'acl_enabled': acl_enabled
+ }
+ # Target level authentication was introduced in ceph-iscsi config v11
+ if config['version'] > 10:
+ target_user = target_config['auth']['username']
+ target_password = target_config['auth']['password']
+ target_mutual_user = target_config['auth']['mutual_username']
+ target_mutual_password = target_config['auth']['mutual_password']
+ target['auth'] = {
+ 'user': target_user,
+ 'password': target_password,
+ 'mutual_user': target_mutual_user,
+ 'mutual_password': target_mutual_password
+ }
+ return target
+
+ @staticmethod
+ def _is_executing(target_iqn):
+ executing_tasks, _ = TaskManager.list()
+ for t in executing_tasks:
+ if t.name.startswith('iscsi/target') and t.metadata.get('target_iqn') == target_iqn:
+ return True
+ return False
+
+ @staticmethod
+ def _set_info(target):
+ if not target['portals']:
+ return
+ target_iqn = target['target_iqn']
+ # During task execution, additional info is not available
+ if IscsiTarget._is_executing(target_iqn):
+ return
+ # If any portal is down, additional info is not available
+ for portal in target['portals']:
+ try:
+ IscsiClient.instance(gateway_name=portal['host']).ping()
+ except (IscsiGatewayDoesNotExist, RequestException):
+ return
+ gateway_name = target['portals'][0]['host']
+ try:
+ target_info = IscsiClient.instance(gateway_name=gateway_name).get_targetinfo(
+ target_iqn)
+ target['info'] = target_info
+ for client in target['clients']:
+ client_iqn = client['client_iqn']
+ client_info = IscsiClient.instance(gateway_name=gateway_name).get_clientinfo(
+ target_iqn, client_iqn)
+ client['info'] = client_info
+ except RequestException as e:
+ # Target/Client has been removed in the meanwhile (e.g. using gwcli)
+ if e.status_code != 404:
+ raise e
+
+ @staticmethod
+ def _sorted_portals(portals):
+ portals = portals or []
+ return sorted(portals, key=lambda p: '{}.{}'.format(p['host'], p['ip']))
+
+ @staticmethod
+ def _sorted_disks(disks):
+ disks = disks or []
+ return sorted(disks, key=lambda d: '{}.{}'.format(d['pool'], d['image']))
+
+ @staticmethod
+ def _sorted_clients(clients):
+ clients = clients or []
+ for client in clients:
+ client['luns'] = sorted(client['luns'],
+ key=lambda d: '{}.{}'.format(d['pool'], d['image']))
+ return sorted(clients, key=lambda c: c['client_iqn'])
+
+ @staticmethod
+ def _sorted_groups(groups):
+ groups = groups or []
+ for group in groups:
+ group['disks'] = sorted(group['disks'],
+ key=lambda d: '{}.{}'.format(d['pool'], d['image']))
+ group['members'] = sorted(group['members'])
+ return sorted(groups, key=lambda g: g['group_id'])
+
+ @staticmethod
+ def _get_portals_by_host(portals):
+ portals_by_host = {}
+ for portal in portals:
+ host = portal['host']
+ ip = portal['ip']
+ if host not in portals_by_host:
+ portals_by_host[host] = []
+ portals_by_host[host].append(ip)
+ return portals_by_host
+
+
+def get_available_gateway():
+ gateways = IscsiGatewaysConfig.get_gateways_config()['gateways']
+ if not gateways:
+ raise DashboardException(msg='There are no gateways defined',
+ code='no_gateways_defined',
+ component='iscsi')
+ for gateway in gateways:
+ try:
+ IscsiClient.instance(gateway_name=gateway).ping()
+ return gateway
+ except RequestException:
+ pass
+ raise DashboardException(msg='There are no gateways available',
+ code='no_gateways_available',
+ component='iscsi')
+
+
+def validate_rest_api(gateways):
+ for gateway in gateways:
+ try:
+ IscsiClient.instance(gateway_name=gateway).ping()
+ except RequestException:
+ raise DashboardException(msg='iSCSI REST Api not available for gateway '
+ '{}'.format(gateway),
+ code='ceph_iscsi_rest_api_not_available_for_gateway',
+ component='iscsi')
+
+
+def validate_auth(auth):
+ username_regex = re.compile(r'^[\w\.:@_-]{8,64}$')
+ password_regex = re.compile(r'^[\w@\-_\/]{12,16}$')
+ result = True
+
+ if auth['user'] or auth['password']:
+ result = bool(username_regex.match(auth['user'])) and \
+ bool(password_regex.match(auth['password']))
+
+ if auth['mutual_user'] or auth['mutual_password']:
+ result = result and bool(username_regex.match(auth['mutual_user'])) and \
+ bool(password_regex.match(auth['mutual_password'])) and auth['user']
+
+ if not result:
+ raise DashboardException(msg='Bad authentication',
+ code='target_bad_auth',
+ component='iscsi')
diff --git a/src/pybind/mgr/dashboard/controllers/logging.py b/src/pybind/mgr/dashboard/controllers/logging.py
new file mode 100644
index 00000000..9c7d6de7
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/logging.py
@@ -0,0 +1,10 @@
+from . import UiApiController, BaseController, Endpoint
+from .. import logger
+
+
+@UiApiController('/logging', secure=False)
+class Logging(BaseController):
+
+ @Endpoint('POST', path='js-error')
+ def jsError(self, url, message, stack=None):
+ logger.error('frontend error (%s): %s\n %s\n', url, message, stack)
diff --git a/src/pybind/mgr/dashboard/controllers/logs.py b/src/pybind/mgr/dashboard/controllers/logs.py
new file mode 100644
index 00000000..9dc5286f
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/logs.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import collections
+
+from . import ApiController, Endpoint, BaseController, ReadPermission
+from ..security import Scope
+from ..services.ceph_service import CephService
+from ..tools import NotificationQueue
+
+
+LOG_BUFFER_SIZE = 30
+
+
+@ApiController('/logs', Scope.LOG)
+class Logs(BaseController):
+ def __init__(self):
+ super(Logs, self).__init__()
+ self._log_initialized = False
+ self.log_buffer = collections.deque(maxlen=LOG_BUFFER_SIZE)
+ self.audit_buffer = collections.deque(maxlen=LOG_BUFFER_SIZE)
+
+ def append_log(self, log_struct):
+ if log_struct['channel'] == 'audit':
+ self.audit_buffer.appendleft(log_struct)
+ else:
+ self.log_buffer.appendleft(log_struct)
+
+ def load_buffer(self, buf, channel_name):
+ lines = CephService.send_command(
+ 'mon', 'log last', channel=channel_name, num=LOG_BUFFER_SIZE)
+ for l in lines:
+ buf.appendleft(l)
+
+ def initialize_buffers(self):
+ if not self._log_initialized:
+ self._log_initialized = True
+
+ self.load_buffer(self.log_buffer, 'cluster')
+ self.load_buffer(self.audit_buffer, 'audit')
+
+ NotificationQueue.register(self.append_log, 'clog')
+
+ @Endpoint()
+ @ReadPermission
+ def all(self):
+ self.initialize_buffers()
+ return dict(
+ clog=list(self.log_buffer),
+ audit_log=list(self.audit_buffer),
+ )
diff --git a/src/pybind/mgr/dashboard/controllers/mgr_modules.py b/src/pybind/mgr/dashboard/controllers/mgr_modules.py
new file mode 100644
index 00000000..c37225c9
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/mgr_modules.py
@@ -0,0 +1,172 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from . import ApiController, RESTController, \
+ allow_empty_body
+from .. import mgr
+from ..security import Scope
+from ..services.ceph_service import CephService
+from ..services.exception import handle_send_command_error
+from ..tools import find_object_in_list, str_to_bool
+
+
+@ApiController('/mgr/module', Scope.CONFIG_OPT)
+class MgrModules(RESTController):
+ ignore_modules = ['selftest']
+
+ def list(self):
+ """
+ Get the list of managed modules.
+ :return: A list of objects with the fields 'enabled', 'name' and 'options'.
+ :rtype: list
+ """
+ result = []
+ mgr_map = mgr.get('mgr_map')
+ always_on_modules = mgr_map['always_on_modules'][mgr.release_name]
+ for module_config in mgr_map['available_modules']:
+ module_name = module_config['name']
+ if module_name not in self.ignore_modules:
+ always_on = module_name in always_on_modules
+ enabled = module_name in mgr_map['modules'] or always_on
+ result.append({
+ 'name': module_name,
+ 'enabled': enabled,
+ 'always_on': always_on,
+ 'options': self._convert_module_options(
+ module_config['module_options'])
+ })
+ return result
+
+ def get(self, module_name):
+ """
+ Retrieve the values of the persistent configuration settings.
+ :param module_name: The name of the Ceph Mgr module.
+ :type module_name: str
+ :return: The values of the module options.
+ :rtype: dict
+ """
+ assert self._is_module_managed(module_name)
+ options = self._get_module_options(module_name)
+ result = {}
+ for name, option in options.items():
+ result[name] = mgr.get_module_option_ex(module_name, name,
+ option['default_value'])
+ return result
+
+ @RESTController.Resource('PUT')
+ def set(self, module_name, config):
+ """
+ Set the values of the persistent configuration settings.
+ :param module_name: The name of the Ceph Mgr module.
+ :type module_name: str
+ :param config: The values of the module options to be stored.
+ :type config: dict
+ """
+ assert self._is_module_managed(module_name)
+ options = self._get_module_options(module_name)
+ for name in options.keys():
+ if name in config:
+ mgr.set_module_option_ex(module_name, name, config[name])
+
+ @RESTController.Resource('POST')
+ @handle_send_command_error('mgr_modules')
+ @allow_empty_body
+ def enable(self, module_name):
+ """
+ Enable the specified Ceph Mgr module.
+ :param module_name: The name of the Ceph Mgr module.
+ :type module_name: str
+ """
+ assert self._is_module_managed(module_name)
+ CephService.send_command(
+ 'mon', 'mgr module enable', module=module_name)
+
+ @RESTController.Resource('POST')
+ @handle_send_command_error('mgr_modules')
+ @allow_empty_body
+ def disable(self, module_name):
+ """
+ Disable the specified Ceph Mgr module.
+ :param module_name: The name of the Ceph Mgr module.
+ :type module_name: str
+ """
+ assert self._is_module_managed(module_name)
+ CephService.send_command(
+ 'mon', 'mgr module disable', module=module_name)
+
+ @RESTController.Resource('GET')
+ def options(self, module_name):
+ """
+ Get the module options of the specified Ceph Mgr module.
+ :param module_name: The name of the Ceph Mgr module.
+ :type module_name: str
+ :return: The module options as list of dicts.
+ :rtype: list
+ """
+ assert self._is_module_managed(module_name)
+ return self._get_module_options(module_name)
+
+ def _is_module_managed(self, module_name):
+ """
+ Check if the specified Ceph Mgr module is managed by this service.
+ :param module_name: The name of the Ceph Mgr module.
+ :type module_name: str
+ :return: Returns ``true`` if the Ceph Mgr module is managed by
+ this service, otherwise ``false``.
+ :rtype: bool
+ """
+ if module_name in self.ignore_modules:
+ return False
+ mgr_map = mgr.get('mgr_map')
+ for module_config in mgr_map['available_modules']:
+ if module_name == module_config['name']:
+ return True
+ return False
+
+ def _get_module_config(self, module_name):
+ """
+ Helper function to get detailed module configuration.
+ :param module_name: The name of the Ceph Mgr module.
+ :type module_name: str
+ :return: The module information, e.g. module name, can run,
+ error string and available module options.
+ :rtype: dict or None
+ """
+ mgr_map = mgr.get('mgr_map')
+ return find_object_in_list('name', module_name,
+ mgr_map['available_modules'])
+
+ def _get_module_options(self, module_name):
+ """
+ Helper function to get the module options.
+ :param module_name: The name of the Ceph Mgr module.
+ :type module_name: str
+ :return: The module options.
+ :rtype: dict
+ """
+ options = self._get_module_config(module_name)['module_options']
+ return self._convert_module_options(options)
+
+ def _convert_module_options(self, options):
+ # Workaround a possible bug in the Ceph Mgr implementation.
+ # Various fields (e.g. default_value, min, max) are always
+ # returned as a string.
+ for option in options.values():
+ if option['type'] == 'str':
+ if option['default_value'] == 'None': # This is Python None
+ option['default_value'] = ''
+ elif option['type'] == 'bool':
+ if option['default_value'] == '':
+ option['default_value'] = False
+ else:
+ option['default_value'] = str_to_bool(
+ option['default_value'])
+ elif option['type'] == 'float':
+ for name in ['default_value', 'min', 'max']:
+ if option[name]: # Skip empty entries
+ option[name] = float(option[name])
+ elif option['type'] in ['uint', 'int', 'size', 'secs']:
+ for name in ['default_value', 'min', 'max']:
+ if option[name]: # Skip empty entries
+ option[name] = int(option[name])
+ return options
diff --git a/src/pybind/mgr/dashboard/controllers/monitor.py b/src/pybind/mgr/dashboard/controllers/monitor.py
new file mode 100644
index 00000000..d4512fcf
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/monitor.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import json
+
+from . import ApiController, Endpoint, BaseController, ReadPermission
+from .. import mgr
+from ..security import Scope
+
+
+@ApiController('/monitor', Scope.MONITOR)
+class Monitor(BaseController):
+ @Endpoint()
+ @ReadPermission
+ def __call__(self):
+ in_quorum, out_quorum = [], []
+
+ counters = ['mon.num_sessions']
+
+ mon_status_json = mgr.get("mon_status")
+ mon_status = json.loads(mon_status_json['json'])
+
+ for mon in mon_status["monmap"]["mons"]:
+ mon["stats"] = {}
+ for counter in counters:
+ data = mgr.get_counter("mon", mon["name"], counter)
+ if data is not None:
+ mon["stats"][counter.split(".")[1]] = data[counter]
+ else:
+ mon["stats"][counter.split(".")[1]] = []
+ if mon["rank"] in mon_status["quorum"]:
+ in_quorum.append(mon)
+ else:
+ out_quorum.append(mon)
+
+ return {
+ 'mon_status': mon_status,
+ 'in_quorum': in_quorum,
+ 'out_quorum': out_quorum
+ }
diff --git a/src/pybind/mgr/dashboard/controllers/nfsganesha.py b/src/pybind/mgr/dashboard/controllers/nfsganesha.py
new file mode 100644
index 00000000..b9599d72
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/nfsganesha.py
@@ -0,0 +1,315 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from functools import partial
+
+import cherrypy
+import cephfs
+
+from . import ApiController, RESTController, UiApiController, BaseController, \
+ Endpoint, Task, ReadPermission, ControllerDoc, EndpointDoc
+from .. import logger
+from ..security import Scope
+from ..services.cephfs import CephFS
+from ..services.cephx import CephX
+from ..services.exception import serialize_dashboard_exception
+from ..services.ganesha import Ganesha, GaneshaConf, NFSException
+from ..services.rgw_client import RgwClient
+
+
+# documentation helpers
+EXPORT_SCHEMA = {
+ 'export_id': (int, 'Export ID'),
+ 'path': (str, 'Export path'),
+ 'cluster_id': (str, 'Cluster identifier'),
+ 'daemons': ([str], 'List of NFS Ganesha daemons identifiers'),
+ 'pseudo': (str, 'Pseudo FS path'),
+ 'tag': (str, 'NFSv3 export tag'),
+ 'access_type': (str, 'Export access type'),
+ 'squash': (str, 'Export squash policy'),
+ 'security_label': (str, 'Security label'),
+ 'protocols': ([int], 'List of protocol types'),
+ 'transports': ([str], 'List of transport types'),
+ 'fsal': ({
+ 'name': (str, 'name of FSAL'),
+ 'user_id': (str, 'CephX user id', True),
+ 'filesystem': (str, 'CephFS filesystem ID', True),
+ 'sec_label_xattr': (str, 'Name of xattr for security label', True),
+ 'rgw_user_id': (str, 'RGW user id', True)
+ }, 'FSAL configuration'),
+ 'clients': ([{
+ 'addresses': ([str], 'list of IP addresses'),
+ 'access_type': (str, 'Client access type'),
+ 'squash': (str, 'Client squash policy')
+ }], 'List of client configurations'),
+}
+
+
+CREATE_EXPORT_SCHEMA = {
+ 'path': (str, 'Export path'),
+ 'cluster_id': (str, 'Cluster identifier'),
+ 'daemons': ([str], 'List of NFS Ganesha daemons identifiers'),
+ 'pseudo': (str, 'Pseudo FS path'),
+ 'tag': (str, 'NFSv3 export tag'),
+ 'access_type': (str, 'Export access type'),
+ 'squash': (str, 'Export squash policy'),
+ 'security_label': (str, 'Security label'),
+ 'protocols': ([int], 'List of protocol types'),
+ 'transports': ([str], 'List of transport types'),
+ 'fsal': ({
+ 'name': (str, 'name of FSAL'),
+ 'user_id': (str, 'CephX user id', True),
+ 'filesystem': (str, 'CephFS filesystem ID', True),
+ 'sec_label_xattr': (str, 'Name of xattr for security label', True),
+ 'rgw_user_id': (str, 'RGW user id', True)
+ }, 'FSAL configuration'),
+ 'clients': ([{
+ 'addresses': ([str], 'list of IP addresses'),
+ 'access_type': (str, 'Client access type'),
+ 'squash': (str, 'Client squash policy')
+ }], 'List of client configurations'),
+ 'reload_daemons': (bool,
+ 'Trigger reload of NFS-Ganesha daemons configuration',
+ True)
+}
+
+
+# pylint: disable=not-callable
+def NfsTask(name, metadata, wait_for):
+ def composed_decorator(func):
+ return Task("nfs/{}".format(name), metadata, wait_for,
+ partial(serialize_dashboard_exception,
+ include_http_status=True))(func)
+ return composed_decorator
+
+
+@ApiController('/nfs-ganesha', Scope.NFS_GANESHA)
+@ControllerDoc("NFS-Ganesha Management API", "NFS-Ganesha")
+class NFSGanesha(RESTController):
+
+ @EndpointDoc("Status of NFS-Ganesha management feature",
+ responses={200: {
+ 'available': (bool, "Is API available?"),
+ 'message': (str, "Error message")
+ }})
+ @Endpoint()
+ @ReadPermission
+ def status(self):
+ status = {'available': True, 'message': None}
+ try:
+ Ganesha.get_ganesha_clusters()
+ except NFSException as e:
+ status['message'] = str(e)
+ status['available'] = False
+
+ return status
+
+
+@ApiController('/nfs-ganesha/export', Scope.NFS_GANESHA)
+@ControllerDoc(group="NFS-Ganesha")
+class NFSGaneshaExports(RESTController):
+ RESOURCE_ID = "cluster_id/export_id"
+
+ @EndpointDoc("List all NFS-Ganesha exports",
+ responses={200: [EXPORT_SCHEMA]})
+ def list(self):
+ result = []
+ for cluster_id in Ganesha.get_ganesha_clusters():
+ result.extend(
+ [export.to_dict()
+ for export in GaneshaConf.instance(cluster_id).list_exports()])
+ return result
+
+ @NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}',
+ 'cluster_id': '{cluster_id}'}, 2.0)
+ @EndpointDoc("Creates a new NFS-Ganesha export",
+ parameters=CREATE_EXPORT_SCHEMA,
+ responses={201: EXPORT_SCHEMA})
+ def create(self, path, cluster_id, daemons, pseudo, tag, access_type,
+ squash, security_label, protocols, transports, fsal, clients,
+ reload_daemons=True):
+ if fsal['name'] not in Ganesha.fsals_available():
+ raise NFSException("Cannot create this export. "
+ "FSAL '{}' cannot be managed by the dashboard."
+ .format(fsal['name']))
+
+ ganesha_conf = GaneshaConf.instance(cluster_id)
+ ex_id = ganesha_conf.create_export({
+ 'path': path,
+ 'pseudo': pseudo,
+ 'cluster_id': cluster_id,
+ 'daemons': daemons,
+ 'tag': tag,
+ 'access_type': access_type,
+ 'squash': squash,
+ 'security_label': security_label,
+ 'protocols': protocols,
+ 'transports': transports,
+ 'fsal': fsal,
+ 'clients': clients
+ })
+ if reload_daemons:
+ ganesha_conf.reload_daemons(daemons)
+ return ganesha_conf.get_export(ex_id).to_dict()
+
+ @EndpointDoc("Get an NFS-Ganesha export",
+ parameters={
+ 'cluster_id': (str, 'Cluster identifier'),
+ 'export_id': (int, "Export ID")
+ },
+ responses={200: EXPORT_SCHEMA})
+ def get(self, cluster_id, export_id):
+ export_id = int(export_id)
+ ganesha_conf = GaneshaConf.instance(cluster_id)
+ if not ganesha_conf.has_export(export_id):
+ raise cherrypy.HTTPError(404)
+ return ganesha_conf.get_export(export_id).to_dict()
+
+ @NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'},
+ 2.0)
+ @EndpointDoc("Updates an NFS-Ganesha export",
+ parameters=dict(export_id=(int, "Export ID"),
+ **CREATE_EXPORT_SCHEMA),
+ responses={200: EXPORT_SCHEMA})
+ def set(self, cluster_id, export_id, path, daemons, pseudo, tag, access_type,
+ squash, security_label, protocols, transports, fsal, clients,
+ reload_daemons=True):
+ export_id = int(export_id)
+ ganesha_conf = GaneshaConf.instance(cluster_id)
+
+ if not ganesha_conf.has_export(export_id):
+ raise cherrypy.HTTPError(404)
+
+ if fsal['name'] not in Ganesha.fsals_available():
+ raise NFSException("Cannot make modifications to this export. "
+ "FSAL '{}' cannot be managed by the dashboard."
+ .format(fsal['name']))
+
+ old_export = ganesha_conf.update_export({
+ 'export_id': export_id,
+ 'path': path,
+ 'cluster_id': cluster_id,
+ 'daemons': daemons,
+ 'pseudo': pseudo,
+ 'tag': tag,
+ 'access_type': access_type,
+ 'squash': squash,
+ 'security_label': security_label,
+ 'protocols': protocols,
+ 'transports': transports,
+ 'fsal': fsal,
+ 'clients': clients
+ })
+ daemons = list(daemons)
+ for d_id in old_export.daemons:
+ if d_id not in daemons:
+ daemons.append(d_id)
+ if reload_daemons:
+ ganesha_conf.reload_daemons(daemons)
+ return ganesha_conf.get_export(export_id).to_dict()
+
+ @NfsTask('delete', {'cluster_id': '{cluster_id}',
+ 'export_id': '{export_id}'}, 2.0)
+ @EndpointDoc("Deletes an NFS-Ganesha export",
+ parameters={
+ 'cluster_id': (str, 'Cluster identifier'),
+ 'export_id': (int, "Export ID"),
+ 'reload_daemons': (bool,
+ 'Trigger reload of NFS-Ganesha daemons'
+ ' configuration',
+ True)
+ })
+ def delete(self, cluster_id, export_id, reload_daemons=True):
+ export_id = int(export_id)
+ ganesha_conf = GaneshaConf.instance(cluster_id)
+
+ if not ganesha_conf.has_export(export_id):
+ raise cherrypy.HTTPError(404)
+
+ export = ganesha_conf.remove_export(export_id)
+ if reload_daemons:
+ ganesha_conf.reload_daemons(export.daemons)
+
+
+@ApiController('/nfs-ganesha/daemon', Scope.NFS_GANESHA)
+@ControllerDoc(group="NFS-Ganesha")
+class NFSGaneshaService(RESTController):
+
+ @EndpointDoc("List NFS-Ganesha daemons information",
+ responses={200: [{
+ 'daemon_id': (str, 'Daemon identifier'),
+ 'cluster_id': (str, 'Cluster identifier'),
+ 'status': (int,
+ 'Status of daemon (1=RUNNING, 0=STOPPED, -1=ERROR',
+ True),
+ 'desc': (str, 'Error description (if status==-1)', True)
+ }]})
+ def list(self):
+ status_dict = Ganesha.get_daemons_status()
+ if status_dict:
+ return [
+ {
+ 'daemon_id': daemon_id,
+ 'cluster_id': cluster_id,
+ 'status': status_dict[cluster_id][daemon_id]['status'],
+ 'desc': status_dict[cluster_id][daemon_id]['desc']
+ }
+ for cluster_id in status_dict
+ for daemon_id in status_dict[cluster_id]
+ ]
+
+ result = []
+ for cluster_id in Ganesha.get_ganesha_clusters():
+ result.extend(
+ [{'daemon_id': daemon_id, 'cluster_id': cluster_id}
+ for daemon_id in GaneshaConf.instance(cluster_id).list_daemons()])
+ return result
+
+
+@UiApiController('/nfs-ganesha', Scope.NFS_GANESHA)
+class NFSGaneshaUi(BaseController):
+ @Endpoint('GET', '/cephx/clients')
+ @ReadPermission
+ def cephx_clients(self):
+ return [client for client in CephX.list_clients()]
+
+ @Endpoint('GET', '/fsals')
+ @ReadPermission
+ def fsals(self):
+ return Ganesha.fsals_available()
+
+ @Endpoint('GET', '/lsdir')
+ @ReadPermission
+ def lsdir(self, root_dir=None, depth=1): # pragma: no cover
+ if root_dir is None:
+ root_dir = "/"
+ depth = int(depth)
+ if depth > 5:
+ logger.warning("[NFS] Limiting depth to maximum value of 5: "
+ "input depth=%s", depth)
+ depth = 5
+ root_dir = '{}/'.format(root_dir) \
+ if not root_dir.endswith('/') else root_dir
+
+ try:
+ cfs = CephFS()
+ paths = cfs.get_dir_list(root_dir, depth)
+ paths = [p[:-1] for p in paths if p != root_dir]
+ return {'paths': paths}
+ except (cephfs.ObjectNotFound, cephfs.PermissionError):
+ return {'paths': []}
+
+ @Endpoint('GET', '/cephfs/filesystems')
+ @ReadPermission
+ def filesystems(self):
+ return CephFS.list_filesystems()
+
+ @Endpoint('GET', '/rgw/buckets')
+ @ReadPermission
+ def buckets(self, user_id=None):
+ return RgwClient.instance(user_id).get_buckets()
+
+ @Endpoint('GET', '/clusters')
+ @ReadPermission
+ def clusters(self):
+ return Ganesha.get_ganesha_clusters()
diff --git a/src/pybind/mgr/dashboard/controllers/osd.py b/src/pybind/mgr/dashboard/controllers/osd.py
new file mode 100644
index 00000000..a971a512
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/osd.py
@@ -0,0 +1,242 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from mgr_util import get_most_recent_rate
+
+from . import ApiController, RESTController, UpdatePermission, \
+ allow_empty_body
+from .. import mgr, logger
+from ..security import Scope
+from ..services.ceph_service import CephService, SendCommandError
+from ..services.exception import handle_send_command_error
+from ..tools import str_to_bool
+try:
+ from typing import Dict, List, Any, Union # pylint: disable=unused-import
+except ImportError:
+ pass # For typing only
+
+
+@ApiController('/osd', Scope.OSD)
+class Osd(RESTController):
+ def list(self):
+ osds = self.get_osd_map()
+
+ # Extending by osd stats information
+ for stat in mgr.get('osd_stats')['osd_stats']:
+ if stat['osd'] in osds:
+ osds[stat['osd']]['osd_stats'] = stat
+
+ # Extending by osd node information
+ nodes = mgr.get('osd_map_tree')['nodes']
+ for node in nodes:
+ if node['type'] == 'osd' and node['id'] in osds:
+ osds[node['id']]['tree'] = node
+
+ # Extending by osd parent node information
+ for host in [n for n in nodes if n['type'] == 'host']:
+ for osd_id in host['children']:
+ if osd_id >= 0 and osd_id in osds:
+ osds[osd_id]['host'] = host
+
+ # Extending by osd histogram data
+ for osd_id, osd in osds.items():
+ osd['stats'] = {}
+ osd['stats_history'] = {}
+ osd_spec = str(osd_id)
+ if 'osd' not in osd:
+ continue
+ for stat in ['osd.op_w', 'osd.op_in_bytes', 'osd.op_r', 'osd.op_out_bytes']:
+ prop = stat.split('.')[1]
+ rates = CephService.get_rates('osd', osd_spec, stat)
+ osd['stats'][prop] = get_most_recent_rate(rates)
+ osd['stats_history'][prop] = rates
+ # Gauge stats
+ for stat in ['osd.numpg', 'osd.stat_bytes', 'osd.stat_bytes_used']:
+ osd['stats'][stat.split('.')[1]] = mgr.get_latest('osd', osd_spec, stat)
+
+ return list(osds.values())
+
+ @staticmethod
+ def get_osd_map(svc_id=None):
+ # type: (Union[int, None]) -> Dict[int, Union[Dict[str, Any], Any]]
+ def add_id(osd):
+ osd['id'] = osd['osd']
+ return osd
+ resp = {
+ osd['osd']: add_id(osd)
+ for osd in mgr.get('osd_map')['osds'] if svc_id is None or osd['osd'] == int(svc_id)
+ }
+ return resp if svc_id is None else resp[int(svc_id)]
+
+ @handle_send_command_error('osd')
+ def get(self, svc_id):
+ """
+ Returns collected data about an OSD.
+
+ :return: Returns the requested data. The `histogram` key man contain a
+ string with an error that occurred when the OSD is down.
+ """
+ try:
+ histogram = CephService.send_command('osd', srv_spec=svc_id,
+ prefix='perf histogram dump')
+ except SendCommandError as e:
+ if 'osd down' in str(e):
+ histogram = str(e)
+ else:
+ raise
+
+ return {
+ 'osd_map': self.get_osd_map(svc_id),
+ 'osd_metadata': mgr.get_metadata('osd', svc_id),
+ 'histogram': histogram,
+ }
+
+ @RESTController.Resource('POST', query_params=['deep'])
+ @UpdatePermission
+ @allow_empty_body
+ def scrub(self, svc_id, deep=False):
+ api_scrub = "osd deep-scrub" if str_to_bool(deep) else "osd scrub"
+ CephService.send_command("mon", api_scrub, who=svc_id)
+
+ @RESTController.Resource('POST')
+ @allow_empty_body
+ def mark_out(self, svc_id):
+ CephService.send_command('mon', 'osd out', ids=[svc_id])
+
+ @RESTController.Resource('POST')
+ @allow_empty_body
+ def mark_in(self, svc_id):
+ CephService.send_command('mon', 'osd in', ids=[svc_id])
+
+ @RESTController.Resource('POST')
+ @allow_empty_body
+ def mark_down(self, svc_id):
+ CephService.send_command('mon', 'osd down', ids=[svc_id])
+
+ @RESTController.Resource('POST')
+ @allow_empty_body
+ def reweight(self, svc_id, weight):
+ """
+ Reweights the OSD temporarily.
+
+ Note that ‘ceph osd reweight’ is not a persistent setting. When an OSD
+ gets marked out, the osd weight will be set to 0. When it gets marked
+ in again, the weight will be changed to 1.
+
+ Because of this ‘ceph osd reweight’ is a temporary solution. You should
+ only use it to keep your cluster running while you’re ordering more
+ hardware.
+
+ - Craig Lewis (http://lists.ceph.com/pipermail/ceph-users-ceph.com/2014-June/040967.html)
+ """
+ CephService.send_command(
+ 'mon',
+ 'osd reweight',
+ id=int(svc_id),
+ weight=float(weight))
+
+ @RESTController.Resource('POST')
+ @allow_empty_body
+ def mark_lost(self, svc_id):
+ """
+ Note: osd must be marked `down` before marking lost.
+ """
+ CephService.send_command(
+ 'mon',
+ 'osd lost',
+ id=int(svc_id),
+ yes_i_really_mean_it=True)
+
+ def create(self, uuid=None, svc_id=None):
+ """
+ :param uuid: Will be set automatically if the OSD starts up.
+ :param id: The ID is only used if a valid uuid is given.
+ :return:
+ """
+ result = CephService.send_command(
+ 'mon', 'osd create', id=int(svc_id), uuid=uuid)
+ return {
+ 'result': result,
+ 'svc_id': int(svc_id),
+ 'uuid': uuid,
+ }
+
+ @RESTController.Resource('POST')
+ @allow_empty_body
+ def purge(self, svc_id):
+ """
+ Note: osd must be marked `down` before removal.
+ """
+ CephService.send_command('mon', 'osd purge-actual', id=int(svc_id),
+ yes_i_really_mean_it=True)
+
+ @RESTController.Resource('POST')
+ @allow_empty_body
+ def destroy(self, svc_id):
+ """
+ Mark osd as being destroyed. Keeps the ID intact (allowing reuse), but
+ removes cephx keys, config-key data and lockbox keys, rendering data
+ permanently unreadable.
+
+ The osd must be marked down before being destroyed.
+ """
+ CephService.send_command(
+ 'mon', 'osd destroy-actual', id=int(svc_id), yes_i_really_mean_it=True)
+
+ @RESTController.Resource('GET')
+ def safe_to_destroy(self, svc_id):
+ """
+ :type svc_id: int|[int]
+ """
+ if not isinstance(svc_id, list):
+ svc_id = [svc_id]
+ svc_id = list(map(str, svc_id))
+ try:
+ result = CephService.send_command(
+ 'mon', 'osd safe-to-destroy', ids=svc_id, target=('mgr', ''))
+ result['is_safe_to_destroy'] = set(result['safe_to_destroy']) == set(map(int, svc_id))
+ return result
+
+ except SendCommandError as e:
+ return {
+ 'message': str(e),
+ 'is_safe_to_destroy': False,
+ }
+
+
+@ApiController('/osd/flags', Scope.OSD)
+class OsdFlagsController(RESTController):
+ @staticmethod
+ def _osd_flags():
+ enabled_flags = mgr.get('osd_map')['flags_set']
+ if 'pauserd' in enabled_flags and 'pausewr' in enabled_flags:
+ # 'pause' is set by calling `ceph osd set pause` and unset by
+ # calling `set osd unset pause`, but `ceph osd dump | jq '.flags'`
+ # will contain 'pauserd,pausewr' if pause is set.
+ # Let's pretend to the API that 'pause' is in fact a proper flag.
+ enabled_flags = list(
+ set(enabled_flags) - {'pauserd', 'pausewr'} | {'pause'})
+ return sorted(enabled_flags)
+
+ def list(self):
+ return self._osd_flags()
+
+ def bulk_set(self, flags):
+ """
+ The `recovery_deletes`, `sortbitwise` and `pglog_hardlimit` flags cannot be unset.
+ `purged_snapshots` cannot even be set. It is therefore required to at
+ least include those four flags for a successful operation.
+ """
+ assert isinstance(flags, list)
+
+ enabled_flags = set(self._osd_flags())
+ data = set(flags)
+ added = data - enabled_flags
+ removed = enabled_flags - data
+ for flag in added:
+ CephService.send_command('mon', 'osd set', '', key=flag)
+ for flag in removed:
+ CephService.send_command('mon', 'osd unset', '', key=flag)
+ logger.info('Changed OSD flags: added=%s removed=%s', added, removed)
+
+ return sorted(enabled_flags - removed | added)
diff --git a/src/pybind/mgr/dashboard/controllers/perf_counters.py b/src/pybind/mgr/dashboard/controllers/perf_counters.py
new file mode 100644
index 00000000..158641ca
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/perf_counters.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import cherrypy
+
+from . import ApiController, RESTController
+from .. import mgr
+from ..security import Scope
+from ..services.ceph_service import CephService
+
+
+class PerfCounter(RESTController):
+ service_type = None # type: str
+
+ def get(self, service_id):
+ schema_dict = mgr.get_perf_schema(self.service_type, str(service_id))
+ try:
+ schema = schema_dict["{}.{}".format(self.service_type, service_id)]
+ except KeyError as e:
+ raise cherrypy.HTTPError(404, "{0} not found".format(e))
+ counters = []
+
+ for key, value in sorted(schema.items()):
+ counter = dict()
+ counter['name'] = str(key)
+ counter['description'] = value['description']
+ # pylint: disable=W0212
+ if mgr._stattype_to_str(value['type']) == 'counter':
+ counter['value'] = CephService.get_rate(
+ self.service_type, service_id, key)
+ counter['unit'] = mgr._unit_to_str(value['units'])
+ else:
+ counter['value'] = mgr.get_latest(
+ self.service_type, service_id, key)
+ counter['unit'] = ''
+ counters.append(counter)
+
+ return {
+ 'service': {
+ 'type': self.service_type,
+ 'id': str(service_id)
+ },
+ 'counters': counters
+ }
+
+
+@ApiController('perf_counters/mds', Scope.CEPHFS)
+class MdsPerfCounter(PerfCounter):
+ service_type = 'mds'
+
+
+@ApiController('perf_counters/mon', Scope.MONITOR)
+class MonPerfCounter(PerfCounter):
+ service_type = 'mon'
+
+
+@ApiController('perf_counters/osd', Scope.OSD)
+class OsdPerfCounter(PerfCounter):
+ service_type = 'osd'
+
+
+@ApiController('perf_counters/rgw', Scope.RGW)
+class RgwPerfCounter(PerfCounter):
+ service_type = 'rgw'
+
+
+@ApiController('perf_counters/rbd-mirror', Scope.RBD_MIRRORING)
+class RbdMirrorPerfCounter(PerfCounter):
+ service_type = 'rbd-mirror'
+
+
+@ApiController('perf_counters/mgr', Scope.MANAGER)
+class MgrPerfCounter(PerfCounter):
+ service_type = 'mgr'
+
+
+@ApiController('perf_counters/tcmu-runner', Scope.ISCSI)
+class TcmuRunnerPerfCounter(PerfCounter):
+ service_type = 'tcmu-runner'
+
+
+@ApiController('perf_counters')
+class PerfCounters(RESTController):
+ def list(self):
+ return mgr.get_all_perf_counters()
diff --git a/src/pybind/mgr/dashboard/controllers/pool.py b/src/pybind/mgr/dashboard/controllers/pool.py
new file mode 100644
index 00000000..c6919fc2
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/pool.py
@@ -0,0 +1,234 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import time
+import cherrypy
+
+from . import ApiController, RESTController, Endpoint, ReadPermission, Task
+from .. import mgr
+from ..security import Scope
+from ..services.ceph_service import CephService
+from ..services.rbd import RbdConfiguration
+from ..services.exception import handle_send_command_error
+from ..tools import str_to_bool, TaskManager
+
+
+def pool_task(name, metadata, wait_for=2.0):
+ return Task("pool/{}".format(name), metadata, wait_for)
+
+
+@ApiController('/pool', Scope.POOL)
+class Pool(RESTController):
+
+ @staticmethod
+ def _serialize_pool(pool, attrs):
+ if not attrs or not isinstance(attrs, list):
+ attrs = pool.keys()
+
+ crush_rules = {r['rule_id']: r["rule_name"] for r in mgr.get('osd_map_crush')['rules']}
+
+ res = {}
+ for attr in attrs:
+ if attr not in pool:
+ continue
+ if attr == 'type':
+ res[attr] = {1: 'replicated', 3: 'erasure'}[pool[attr]]
+ elif attr == 'crush_rule':
+ res[attr] = crush_rules[pool[attr]]
+ elif attr == 'application_metadata':
+ res[attr] = list(pool[attr].keys())
+ else:
+ res[attr] = pool[attr]
+
+ # pool_name is mandatory
+ res['pool_name'] = pool['pool_name']
+ return res
+
+ @classmethod
+ def _pool_list(cls, attrs=None, stats=False):
+ if attrs:
+ attrs = attrs.split(',')
+
+ if str_to_bool(stats):
+ pools = CephService.get_pool_list_with_stats()
+ else:
+ pools = CephService.get_pool_list()
+
+ return [cls._serialize_pool(pool, attrs) for pool in pools]
+
+ def list(self, attrs=None, stats=False):
+ return self._pool_list(attrs, stats)
+
+ @classmethod
+ def _get(cls, pool_name, attrs=None, stats=False):
+ # type: (str, str, bool) -> dict
+ pools = cls._pool_list(attrs, stats)
+ pool = [pool for pool in pools if pool['pool_name'] == pool_name]
+ if not pool:
+ raise cherrypy.NotFound('No such pool')
+ return pool[0]
+
+ def get(self, pool_name, attrs=None, stats=False):
+ # type: (str, str, bool) -> dict
+ pool = self._get(pool_name, attrs, stats)
+ pool['configuration'] = RbdConfiguration(pool_name).list()
+ return pool
+
+ @pool_task('delete', ['{pool_name}'])
+ @handle_send_command_error('pool')
+ def delete(self, pool_name):
+ return CephService.send_command('mon', 'osd pool delete', pool=pool_name, pool2=pool_name,
+ yes_i_really_really_mean_it=True)
+
+ @pool_task('edit', ['{pool_name}'])
+ def set(self, pool_name, flags=None, application_metadata=None, configuration=None, **kwargs):
+ self._set_pool_values(pool_name, application_metadata, flags, True, kwargs)
+ if kwargs.get('pool'):
+ pool_name = kwargs['pool']
+ RbdConfiguration(pool_name).set_configuration(configuration)
+ self._wait_for_pgs(pool_name)
+
+ @pool_task('create', {'pool_name': '{pool}'})
+ @handle_send_command_error('pool')
+ def create(self, pool, pg_num, pool_type, erasure_code_profile=None, flags=None,
+ application_metadata=None, rule_name=None, configuration=None, **kwargs):
+ ecp = erasure_code_profile if erasure_code_profile else None
+ CephService.send_command('mon', 'osd pool create', pool=pool, pg_num=int(pg_num),
+ pgp_num=int(pg_num), pool_type=pool_type, erasure_code_profile=ecp,
+ rule=rule_name)
+ self._set_pool_values(pool, application_metadata, flags, False, kwargs)
+ RbdConfiguration(pool).set_configuration(configuration)
+ self._wait_for_pgs(pool)
+
+ def _set_pool_values(self, pool, application_metadata, flags, update_existing, kwargs):
+ update_name = False
+ current_pool = self._get(pool)
+ if update_existing and kwargs.get('compression_mode') == 'unset':
+ self._prepare_compression_removal(current_pool.get('options'), kwargs)
+ if flags and 'ec_overwrites' in flags:
+ CephService.send_command('mon', 'osd pool set', pool=pool, var='allow_ec_overwrites',
+ val='true')
+ if application_metadata is not None:
+ def set_app(what, app):
+ CephService.send_command('mon', 'osd pool application ' + what, pool=pool, app=app,
+ yes_i_really_mean_it=True)
+ if update_existing:
+ original_app_metadata = set(
+ current_pool.get('application_metadata'))
+ else:
+ original_app_metadata = set()
+
+ for app in original_app_metadata - set(application_metadata):
+ set_app('disable', app)
+ for app in set(application_metadata) - original_app_metadata:
+ set_app('enable', app)
+
+ def set_key(key, value):
+ CephService.send_command('mon', 'osd pool set', pool=pool, var=key, val=str(value))
+
+ for key, value in kwargs.items():
+ if key == 'pool':
+ update_name = True
+ destpool = value
+ else:
+ set_key(key, value)
+ if key == 'pg_num':
+ set_key('pgp_num', value)
+ if update_name:
+ CephService.send_command('mon', 'osd pool rename', srcpool=pool, destpool=destpool)
+
+ def _prepare_compression_removal(self, options, kwargs):
+ """
+ Presets payload with values to remove compression attributes in case they are not
+ needed anymore.
+
+ In case compression is not needed the dashboard will send 'compression_mode' with the
+ value 'unset'.
+
+ :param options: All set options for the current pool.
+ :param kwargs: Payload of the PUT / POST call
+ """
+ if options is not None:
+ def reset_arg(arg, value):
+ if options.get(arg):
+ kwargs[arg] = value
+ for arg in ['compression_min_blob_size', 'compression_max_blob_size',
+ 'compression_required_ratio']:
+ reset_arg(arg, '0')
+ reset_arg('compression_algorithm', 'unset')
+
+ @classmethod
+ def _wait_for_pgs(cls, pool_name):
+ """
+ Keep the task waiting for until all pg changes are complete
+ :param pool_name: The name of the pool.
+ :type pool_name: string
+ """
+ current_pool = cls._get(pool_name)
+ initial_pgs = int(current_pool['pg_placement_num']) + int(current_pool['pg_num'])
+ cls._pg_wait_loop(current_pool, initial_pgs)
+
+ @classmethod
+ def _pg_wait_loop(cls, pool, initial_pgs):
+ """
+ Compares if all pg changes are completed, if not it will call itself
+ until all changes are completed.
+ :param pool: The dict that represents a pool.
+ :type pool: dict
+ :param initial_pgs: The pg and pg_num count before any change happened.
+ :type initial_pgs: int
+ """
+ if 'pg_num_target' in pool:
+ target = int(pool['pg_num_target']) + int(pool['pg_placement_num_target'])
+ current = int(pool['pg_placement_num']) + int(pool['pg_num'])
+ if current != target:
+ max_diff = abs(target - initial_pgs)
+ diff = max_diff - abs(target - current)
+ percentage = int(round(diff / float(max_diff) * 100))
+ TaskManager.current_task().set_progress(percentage)
+ time.sleep(4)
+ cls._pg_wait_loop(cls._get(pool['pool_name']), initial_pgs)
+
+ @RESTController.Resource()
+ @ReadPermission
+ def configuration(self, pool_name):
+ return RbdConfiguration(pool_name).list()
+
+ @Endpoint()
+ @ReadPermission
+ def _info(self, pool_name=''):
+ # type: (str) -> dict
+ """Used by the create-pool dialog"""
+
+ def rules(pool_type):
+ return [r
+ for r in mgr.get('osd_map_crush')['rules']
+ if r['type'] == pool_type]
+
+ def all_bluestore():
+ return all(o['osd_objectstore'] == 'bluestore'
+ for o in mgr.get('osd_metadata').values())
+
+ def get_config_option_enum(conf_name):
+ return [[v for v in o['enum_values'] if len(v) > 0]
+ for o in mgr.get('config_options')['options']
+ if o['name'] == conf_name][0]
+
+ mgr_config = mgr.get('config')
+ result = {
+ "pool_names": [p['pool_name'] for p in self._pool_list()],
+ "crush_rules_replicated": rules(1),
+ "crush_rules_erasure": rules(3),
+ "is_all_bluestore": all_bluestore(),
+ "osd_count": len(mgr.get('osd_map')['osds']),
+ "bluestore_compression_algorithm": mgr_config['bluestore_compression_algorithm'],
+ "compression_algorithms": get_config_option_enum('bluestore_compression_algorithm'),
+ "compression_modes": get_config_option_enum('bluestore_compression_mode'),
+ "pg_autoscale_default_mode": mgr_config['osd_pool_default_pg_autoscale_mode'],
+ "pg_autoscale_modes": get_config_option_enum('osd_pool_default_pg_autoscale_mode'),
+ }
+
+ if pool_name:
+ result['pool_options'] = RbdConfiguration(pool_name).list()
+
+ return result
diff --git a/src/pybind/mgr/dashboard/controllers/prometheus.py b/src/pybind/mgr/dashboard/controllers/prometheus.py
new file mode 100644
index 00000000..40333541
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/prometheus.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from datetime import datetime
+import json
+import requests
+
+from . import Controller, ApiController, BaseController, RESTController, Endpoint
+from ..security import Scope
+from ..settings import Settings
+from ..exceptions import DashboardException
+
+
+@Controller('/api/prometheus_receiver', secure=False)
+class PrometheusReceiver(BaseController):
+ ''' The receiver is needed in order to receive alert notifications (reports) '''
+ notifications = []
+
+ @Endpoint('POST', path='/')
+ def fetch_alert(self, **notification):
+ notification['notified'] = datetime.now().isoformat()
+ notification['id'] = str(len(self.notifications))
+ self.notifications.append(notification)
+
+
+class PrometheusRESTController(RESTController):
+ def prometheus_proxy(self, method, path, params=None, payload=None):
+ return self._proxy(self._get_api_url(Settings.PROMETHEUS_API_HOST),
+ method, path, params, payload)
+
+ def alert_proxy(self, method, path, params=None, payload=None):
+ return self._proxy(self._get_api_url(Settings.ALERTMANAGER_API_HOST),
+ method, path, params, payload)
+
+ def _get_api_url(self, host):
+ return host.rstrip('/') + '/api/v1'
+
+ def _proxy(self, base_url, method, path, params=None, payload=None):
+ try:
+ response = requests.request(method, base_url + path, params=params, json=payload)
+ except Exception:
+ raise DashboardException('Could not reach external API', http_status_code=404,
+ component='prometheus')
+ content = json.loads(response.content)
+ if content['status'] == 'success':
+ if 'data' in content:
+ return content['data']
+ return content
+ raise DashboardException(content, http_status_code=400, component='prometheus')
+
+
+@ApiController('/prometheus', Scope.PROMETHEUS)
+class Prometheus(PrometheusRESTController):
+ def list(self, **params):
+ return self.alert_proxy('GET', '/alerts', params)
+
+ @RESTController.Collection(method='GET')
+ def rules(self, **params):
+ return self.prometheus_proxy('GET', '/rules', params)
+
+ @RESTController.Collection(method='GET', path='/silences')
+ def get_silences(self, **params):
+ return self.alert_proxy('GET', '/silences', params)
+
+ @RESTController.Collection(method='POST', path='/silence', status=201)
+ def create_silence(self, **params):
+ return self.alert_proxy('POST', '/silences', payload=params)
+
+ @RESTController.Collection(method='DELETE', path='/silence/{s_id}', status=204)
+ def delete_silence(self, s_id):
+ return self.alert_proxy('DELETE', '/silence/' + s_id) if s_id else None
+
+
+@ApiController('/prometheus/notifications', Scope.PROMETHEUS)
+class PrometheusNotifications(RESTController):
+
+ def list(self, **params):
+ if 'from' in params:
+ f = params['from']
+ if f == 'last':
+ return PrometheusReceiver.notifications[-1:]
+ return PrometheusReceiver.notifications[int(f) + 1:]
+ return PrometheusReceiver.notifications
diff --git a/src/pybind/mgr/dashboard/controllers/rbd.py b/src/pybind/mgr/dashboard/controllers/rbd.py
new file mode 100644
index 00000000..52dca087
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/rbd.py
@@ -0,0 +1,526 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=unused-argument
+# pylint: disable=too-many-statements,too-many-branches
+from __future__ import absolute_import
+
+import math
+from functools import partial
+from datetime import datetime
+
+import cherrypy
+
+import rbd
+
+from . import ApiController, RESTController, Task, UpdatePermission, \
+ DeletePermission, CreatePermission, ReadPermission, allow_empty_body
+from .. import mgr, logger
+from ..security import Scope
+from ..services.ceph_service import CephService
+from ..services.rbd import RbdConfiguration, format_bitmask, format_features
+from ..tools import ViewCache, str_to_bool
+from ..services.exception import handle_rados_error, handle_rbd_error, \
+ serialize_dashboard_exception
+
+
+# pylint: disable=not-callable
+def RbdTask(name, metadata, wait_for):
+ def composed_decorator(func):
+ func = handle_rados_error('pool')(func)
+ func = handle_rbd_error()(func)
+ return Task("rbd/{}".format(name), metadata, wait_for,
+ partial(serialize_dashboard_exception, include_http_status=True))(func)
+ return composed_decorator
+
+
+def _rbd_call(pool_name, func, *args, **kwargs):
+ with mgr.rados.open_ioctx(pool_name) as ioctx:
+ func(ioctx, *args, **kwargs)
+
+
+def _rbd_image_call(pool_name, image_name, func, *args, **kwargs):
+ def _ioctx_func(ioctx, image_name, func, *args, **kwargs):
+ with rbd.Image(ioctx, image_name) as img:
+ func(ioctx, img, *args, **kwargs)
+
+ return _rbd_call(pool_name, _ioctx_func, image_name, func, *args, **kwargs)
+
+
+def _sort_features(features, enable=True):
+ """
+ Sorts image features according to feature dependencies:
+
+ object-map depends on exclusive-lock
+ journaling depends on exclusive-lock
+ fast-diff depends on object-map
+ """
+ ORDER = ['exclusive-lock', 'journaling', 'object-map', 'fast-diff']
+
+ def key_func(feat):
+ try:
+ return ORDER.index(feat)
+ except ValueError:
+ return id(feat)
+
+ features.sort(key=key_func, reverse=not enable)
+
+
+@ApiController('/block/image', Scope.RBD_IMAGE)
+class Rbd(RESTController):
+
+ RESOURCE_ID = "pool_name/image_name"
+
+ # set of image features that can be enable on existing images
+ ALLOW_ENABLE_FEATURES = {"exclusive-lock", "object-map", "fast-diff", "journaling"}
+
+ # set of image features that can be disabled on existing images
+ ALLOW_DISABLE_FEATURES = {"exclusive-lock", "object-map", "fast-diff", "deep-flatten",
+ "journaling"}
+
+ @classmethod
+ def _rbd_disk_usage(cls, image, snaps, whole_object=True):
+ class DUCallback(object):
+ def __init__(self):
+ self.used_size = 0
+
+ def __call__(self, offset, length, exists):
+ if exists:
+ self.used_size += length
+
+ snap_map = {}
+ prev_snap = None
+ total_used_size = 0
+ for _, size, name in snaps:
+ image.set_snap(name)
+ du_callb = DUCallback()
+ image.diff_iterate(0, size, prev_snap, du_callb,
+ whole_object=whole_object)
+ snap_map[name] = du_callb.used_size
+ total_used_size += du_callb.used_size
+ prev_snap = name
+
+ return total_used_size, snap_map
+
+ @classmethod
+ def _rbd_image(cls, ioctx, pool_name, image_name):
+ with rbd.Image(ioctx, image_name) as img:
+ stat = img.stat()
+ stat['name'] = image_name
+ if img.old_format():
+ stat['unique_id'] = '{}/{}'.format(pool_name, stat['block_name_prefix'])
+ stat['id'] = stat['unique_id']
+ stat['image_format'] = 1
+ else:
+ stat['unique_id'] = '{}/{}'.format(pool_name, img.id())
+ stat['id'] = img.id()
+ stat['image_format'] = 2
+
+ stat['pool_name'] = pool_name
+ features = img.features()
+ stat['features'] = features
+ stat['features_name'] = format_bitmask(features)
+
+ # the following keys are deprecated
+ del stat['parent_pool']
+ del stat['parent_name']
+
+ stat['timestamp'] = "{}Z".format(img.create_timestamp()
+ .isoformat())
+
+ stat['stripe_count'] = img.stripe_count()
+ stat['stripe_unit'] = img.stripe_unit()
+
+ data_pool_name = CephService.get_pool_name_from_id(
+ img.data_pool_id())
+ if data_pool_name == pool_name:
+ data_pool_name = None
+ stat['data_pool'] = data_pool_name
+
+ try:
+ parent_info = img.parent_info()
+ stat['parent'] = {
+ 'pool_name': parent_info[0],
+ 'image_name': parent_info[1],
+ 'snap_name': parent_info[2]
+ }
+ except rbd.ImageNotFound:
+ # no parent image
+ stat['parent'] = None
+
+ # snapshots
+ stat['snapshots'] = []
+ for snap in img.list_snaps():
+ snap['timestamp'] = "{}Z".format(
+ img.get_snap_timestamp(snap['id']).isoformat())
+ snap['is_protected'] = img.is_protected_snap(snap['name'])
+ snap['used_bytes'] = None
+ snap['children'] = []
+ img.set_snap(snap['name'])
+ for child_pool_name, child_image_name in img.list_children():
+ snap['children'].append({
+ 'pool_name': child_pool_name,
+ 'image_name': child_image_name
+ })
+ stat['snapshots'].append(snap)
+
+ # disk usage
+ img_flags = img.flags()
+ if 'fast-diff' in stat['features_name'] and \
+ not rbd.RBD_FLAG_FAST_DIFF_INVALID & img_flags:
+ snaps = [(s['id'], s['size'], s['name'])
+ for s in stat['snapshots']]
+ snaps.sort(key=lambda s: s[0])
+ snaps += [(snaps[-1][0]+1 if snaps else 0, stat['size'], None)]
+ total_prov_bytes, snaps_prov_bytes = cls._rbd_disk_usage(
+ img, snaps, True)
+ stat['total_disk_usage'] = total_prov_bytes
+ for snap, prov_bytes in snaps_prov_bytes.items():
+ if snap is None:
+ stat['disk_usage'] = prov_bytes
+ continue
+ for ss in stat['snapshots']:
+ if ss['name'] == snap:
+ ss['disk_usage'] = prov_bytes
+ break
+ else:
+ stat['total_disk_usage'] = None
+ stat['disk_usage'] = None
+
+ stat['configuration'] = RbdConfiguration(pool_ioctx=ioctx, image_name=image_name).list()
+
+ return stat
+
+ @classmethod
+ @ViewCache()
+ def _rbd_pool_list(cls, pool_name):
+ rbd_inst = rbd.RBD()
+ with mgr.rados.open_ioctx(pool_name) as ioctx:
+ names = rbd_inst.list(ioctx)
+ result = []
+ for name in names:
+ try:
+ stat = cls._rbd_image(ioctx, pool_name, name)
+ except rbd.ImageNotFound:
+ # may have been removed in the meanwhile
+ continue
+ result.append(stat)
+ return result
+
+ def _rbd_list(self, pool_name=None):
+ if pool_name:
+ pools = [pool_name]
+ else:
+ pools = [p['pool_name'] for p in CephService.get_pool_list('rbd')]
+
+ result = []
+ for pool in pools:
+ # pylint: disable=unbalanced-tuple-unpacking
+ status, value = self._rbd_pool_list(pool)
+ for i, image in enumerate(value):
+ value[i]['configuration'] = RbdConfiguration(pool, image['name']).list()
+ result.append({'status': status, 'value': value, 'pool_name': pool})
+ return result
+
+ @handle_rbd_error()
+ @handle_rados_error('pool')
+ def list(self, pool_name=None):
+ return self._rbd_list(pool_name)
+
+ @handle_rbd_error()
+ @handle_rados_error('pool')
+ def get(self, pool_name, image_name):
+ ioctx = mgr.rados.open_ioctx(pool_name)
+ try:
+ return self._rbd_image(ioctx, pool_name, image_name)
+ except rbd.ImageNotFound:
+ raise cherrypy.HTTPError(404)
+
+ @RbdTask('create',
+ {'pool_name': '{pool_name}', 'image_name': '{name}'}, 2.0)
+ def create(self, name, pool_name, size, obj_size=None, features=None,
+ stripe_unit=None, stripe_count=None, data_pool=None, configuration=None):
+
+ size = int(size)
+
+ def _create(ioctx):
+ rbd_inst = rbd.RBD()
+
+ # Set order
+ l_order = None
+ if obj_size and obj_size > 0:
+ l_order = int(round(math.log(float(obj_size), 2)))
+
+ # Set features
+ feature_bitmask = format_features(features)
+
+ rbd_inst.create(ioctx, name, size, order=l_order, old_format=False,
+ features=feature_bitmask, stripe_unit=stripe_unit,
+ stripe_count=stripe_count, data_pool=data_pool)
+ RbdConfiguration(pool_ioctx=ioctx, image_name=name).set_configuration(configuration)
+
+ _rbd_call(pool_name, _create)
+
+ @RbdTask('delete', ['{pool_name}', '{image_name}'], 2.0)
+ def delete(self, pool_name, image_name):
+ rbd_inst = rbd.RBD()
+ return _rbd_call(pool_name, rbd_inst.remove, image_name)
+
+ @RbdTask('edit', ['{pool_name}', '{image_name}', '{name}'], 4.0)
+ def set(self, pool_name, image_name, name=None, size=None, features=None, configuration=None):
+ def _edit(ioctx, image):
+ rbd_inst = rbd.RBD()
+ # check rename image
+ if name and name != image_name:
+ rbd_inst.rename(ioctx, image_name, name)
+
+ # check resize
+ if size and size != image.size():
+ image.resize(size)
+
+ # check enable/disable features
+ if features is not None:
+ curr_features = format_bitmask(image.features())
+ # check disabled features
+ _sort_features(curr_features, enable=False)
+ for feature in curr_features:
+ if feature not in features and feature in self.ALLOW_DISABLE_FEATURES:
+ if feature not in format_bitmask(image.features()):
+ continue
+ f_bitmask = format_features([feature])
+ image.update_features(f_bitmask, False)
+ # check enabled features
+ _sort_features(features)
+ for feature in features:
+ if feature not in curr_features and feature in self.ALLOW_ENABLE_FEATURES:
+ if feature in format_bitmask(image.features()):
+ continue
+ f_bitmask = format_features([feature])
+ image.update_features(f_bitmask, True)
+
+ RbdConfiguration(pool_ioctx=ioctx, image_name=image_name).set_configuration(
+ configuration)
+
+ return _rbd_image_call(pool_name, image_name, _edit)
+
+ @RbdTask('copy',
+ {'src_pool_name': '{pool_name}',
+ 'src_image_name': '{image_name}',
+ 'dest_pool_name': '{dest_pool_name}',
+ 'dest_image_name': '{dest_image_name}'}, 2.0)
+ @RESTController.Resource('POST')
+ @allow_empty_body
+ def copy(self, pool_name, image_name, dest_pool_name, dest_image_name,
+ snapshot_name=None, obj_size=None, features=None, stripe_unit=None,
+ stripe_count=None, data_pool=None, configuration=None):
+
+ def _src_copy(s_ioctx, s_img):
+ def _copy(d_ioctx):
+ # Set order
+ l_order = None
+ if obj_size and obj_size > 0:
+ l_order = int(round(math.log(float(obj_size), 2)))
+
+ # Set features
+ feature_bitmask = format_features(features)
+
+ if snapshot_name:
+ s_img.set_snap(snapshot_name)
+
+ s_img.copy(d_ioctx, dest_image_name, feature_bitmask, l_order,
+ stripe_unit, stripe_count, data_pool)
+ RbdConfiguration(pool_ioctx=d_ioctx, image_name=dest_image_name).set_configuration(
+ configuration)
+
+ return _rbd_call(dest_pool_name, _copy)
+
+ return _rbd_image_call(pool_name, image_name, _src_copy)
+
+ @RbdTask('flatten', ['{pool_name}', '{image_name}'], 2.0)
+ @RESTController.Resource('POST')
+ @UpdatePermission
+ @allow_empty_body
+ def flatten(self, pool_name, image_name):
+
+ def _flatten(ioctx, image):
+ image.flatten()
+
+ return _rbd_image_call(pool_name, image_name, _flatten)
+
+ @RESTController.Collection('GET')
+ def default_features(self):
+ rbd_default_features = mgr.get('config')['rbd_default_features']
+ return format_bitmask(int(rbd_default_features))
+
+ @RbdTask('trash/move', ['{pool_name}', '{image_name}'], 2.0)
+ @RESTController.Resource('POST')
+ @allow_empty_body
+ def move_trash(self, pool_name, image_name, delay=0):
+ """Move an image to the trash.
+ Images, even ones actively in-use by clones,
+ can be moved to the trash and deleted at a later time.
+ """
+ rbd_inst = rbd.RBD()
+ return _rbd_call(pool_name, rbd_inst.trash_move, image_name, delay)
+
+ @RESTController.Resource()
+ @ReadPermission
+ def configuration(self, pool_name, image_name):
+ return RbdConfiguration(pool_name, image_name).list()
+
+
+@ApiController('/block/image/{pool_name}/{image_name}/snap', Scope.RBD_IMAGE)
+class RbdSnapshot(RESTController):
+
+ RESOURCE_ID = "snapshot_name"
+
+ @RbdTask('snap/create',
+ ['{pool_name}', '{image_name}', '{snapshot_name}'], 2.0)
+ def create(self, pool_name, image_name, snapshot_name):
+ def _create_snapshot(ioctx, img, snapshot_name):
+ img.create_snap(snapshot_name)
+
+ return _rbd_image_call(pool_name, image_name, _create_snapshot,
+ snapshot_name)
+
+ @RbdTask('snap/delete',
+ ['{pool_name}', '{image_name}', '{snapshot_name}'], 2.0)
+ def delete(self, pool_name, image_name, snapshot_name):
+ def _remove_snapshot(ioctx, img, snapshot_name):
+ img.remove_snap(snapshot_name)
+
+ return _rbd_image_call(pool_name, image_name, _remove_snapshot,
+ snapshot_name)
+
+ @RbdTask('snap/edit',
+ ['{pool_name}', '{image_name}', '{snapshot_name}'], 4.0)
+ def set(self, pool_name, image_name, snapshot_name, new_snap_name=None,
+ is_protected=None):
+ def _edit(ioctx, img, snapshot_name):
+ if new_snap_name and new_snap_name != snapshot_name:
+ img.rename_snap(snapshot_name, new_snap_name)
+ snapshot_name = new_snap_name
+ if is_protected is not None and \
+ is_protected != img.is_protected_snap(snapshot_name):
+ if is_protected:
+ img.protect_snap(snapshot_name)
+ else:
+ img.unprotect_snap(snapshot_name)
+
+ return _rbd_image_call(pool_name, image_name, _edit, snapshot_name)
+
+ @RbdTask('snap/rollback',
+ ['{pool_name}', '{image_name}', '{snapshot_name}'], 5.0)
+ @RESTController.Resource('POST')
+ @UpdatePermission
+ @allow_empty_body
+ def rollback(self, pool_name, image_name, snapshot_name):
+ def _rollback(ioctx, img, snapshot_name):
+ img.rollback_to_snap(snapshot_name)
+ return _rbd_image_call(pool_name, image_name, _rollback, snapshot_name)
+
+ @RbdTask('clone',
+ {'parent_pool_name': '{pool_name}',
+ 'parent_image_name': '{image_name}',
+ 'parent_snap_name': '{snapshot_name}',
+ 'child_pool_name': '{child_pool_name}',
+ 'child_image_name': '{child_image_name}'}, 2.0)
+ @RESTController.Resource('POST')
+ @allow_empty_body
+ def clone(self, pool_name, image_name, snapshot_name, child_pool_name,
+ child_image_name, obj_size=None, features=None, stripe_unit=None, stripe_count=None,
+ data_pool=None, configuration=None):
+ """
+ Clones a snapshot to an image
+ """
+
+ def _parent_clone(p_ioctx):
+ def _clone(ioctx):
+ # Set order
+ l_order = None
+ if obj_size and obj_size > 0:
+ l_order = int(round(math.log(float(obj_size), 2)))
+
+ # Set features
+ feature_bitmask = format_features(features)
+
+ rbd_inst = rbd.RBD()
+ rbd_inst.clone(p_ioctx, image_name, snapshot_name, ioctx,
+ child_image_name, feature_bitmask, l_order,
+ stripe_unit, stripe_count, data_pool)
+
+ RbdConfiguration(pool_ioctx=ioctx, image_name=child_image_name).set_configuration(
+ configuration)
+
+ return _rbd_call(child_pool_name, _clone)
+
+ _rbd_call(pool_name, _parent_clone)
+
+
+@ApiController('/block/image/trash', Scope.RBD_IMAGE)
+class RbdTrash(RESTController):
+ RESOURCE_ID = "pool_name/image_id"
+ rbd_inst = rbd.RBD()
+
+ @ViewCache()
+ def _trash_pool_list(self, pool_name):
+ with mgr.rados.open_ioctx(pool_name) as ioctx:
+ images = self.rbd_inst.trash_list(ioctx)
+ result = []
+ for trash in images:
+ trash['pool_name'] = pool_name
+ trash['deletion_time'] = "{}Z".format(trash['deletion_time'].isoformat())
+ trash['deferment_end_time'] = "{}Z".format(trash['deferment_end_time'].isoformat())
+ result.append(trash)
+ return result
+
+ def _trash_list(self, pool_name=None):
+ if pool_name:
+ pools = [pool_name]
+ else:
+ pools = [p['pool_name'] for p in CephService.get_pool_list('rbd')]
+
+ result = []
+ for pool in pools:
+ # pylint: disable=unbalanced-tuple-unpacking
+ status, value = self._trash_pool_list(pool)
+ result.append({'status': status, 'value': value, 'pool_name': pool})
+ return result
+
+ @handle_rbd_error()
+ @handle_rados_error('pool')
+ def list(self, pool_name=None):
+ """List all entries from trash."""
+ return self._trash_list(pool_name)
+
+ @handle_rbd_error()
+ @handle_rados_error('pool')
+ @RbdTask('trash/purge', ['{pool_name}'], 2.0)
+ @RESTController.Collection('POST', query_params=['pool_name'])
+ @DeletePermission
+ @allow_empty_body
+ def purge(self, pool_name=None):
+ """Remove all expired images from trash."""
+ now = "{}Z".format(datetime.utcnow().isoformat())
+ pools = self._trash_list(pool_name)
+
+ for pool in pools:
+ for image in pool['value']:
+ if image['deferment_end_time'] < now:
+ logger.info('Removing trash image %s (pool=%s, name=%s)',
+ image['id'], pool['pool_name'], image['name'])
+ _rbd_call(pool['pool_name'], self.rbd_inst.trash_remove, image['id'], 0)
+
+ @RbdTask('trash/restore', ['{pool_name}', '{image_id}', '{new_image_name}'], 2.0)
+ @RESTController.Resource('POST')
+ @CreatePermission
+ @allow_empty_body
+ def restore(self, pool_name, image_id, new_image_name):
+ """Restore an image from trash."""
+ return _rbd_call(pool_name, self.rbd_inst.trash_restore, image_id, new_image_name)
+
+ @RbdTask('trash/remove', ['{pool_name}', '{image_id}', '{image_name}'], 2.0)
+ def delete(self, pool_name, image_id, image_name, force=False):
+ """Delete an image from trash.
+ If image deferment time has not expired you can not removed it unless use force.
+ But an actively in-use by clones or has snapshots can not be removed.
+ """
+ return _rbd_call(pool_name, self.rbd_inst.trash_remove, image_id, int(str_to_bool(force)))
diff --git a/src/pybind/mgr/dashboard/controllers/rbd_mirroring.py b/src/pybind/mgr/dashboard/controllers/rbd_mirroring.py
new file mode 100644
index 00000000..0f6574a4
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/rbd_mirroring.py
@@ -0,0 +1,460 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import json
+import re
+
+from functools import partial
+
+import cherrypy
+
+import rbd
+
+from . import ApiController, Endpoint, Task, BaseController, ReadPermission, \
+ RESTController
+from .. import logger, mgr
+from ..security import Scope
+from ..services.ceph_service import CephService
+from ..tools import ViewCache
+from ..services.exception import handle_rados_error, handle_rbd_error, \
+ serialize_dashboard_exception
+
+
+# pylint: disable=not-callable
+def handle_rbd_mirror_error():
+ def composed_decorator(func):
+ func = handle_rados_error('rbd-mirroring')(func)
+ return handle_rbd_error()(func)
+ return composed_decorator
+
+
+# pylint: disable=not-callable
+def RbdMirroringTask(name, metadata, wait_for):
+ def composed_decorator(func):
+ func = handle_rbd_mirror_error()(func)
+ return Task("rbd/mirroring/{}".format(name), metadata, wait_for,
+ partial(serialize_dashboard_exception, include_http_status=True))(func)
+ return composed_decorator
+
+
+def _rbd_call(pool_name, func, *args, **kwargs):
+ with mgr.rados.open_ioctx(pool_name) as ioctx:
+ func(ioctx, *args, **kwargs)
+
+
+@ViewCache()
+def get_daemons_and_pools(): # pylint: disable=R0915
+ def get_daemons():
+ daemons = []
+ for hostname, server in CephService.get_service_map('rbd-mirror').items():
+ for service in server['services']:
+ id = service['id'] # pylint: disable=W0622
+ metadata = service['metadata']
+ status = service['status'] or {}
+
+ try:
+ status = json.loads(status['json'])
+ except (ValueError, KeyError) as _:
+ status = {}
+
+ instance_id = metadata['instance_id']
+ if id == instance_id:
+ # new version that supports per-cluster leader elections
+ id = metadata['id']
+
+ # extract per-daemon service data and health
+ daemon = {
+ 'id': id,
+ 'instance_id': instance_id,
+ 'version': metadata['ceph_version'],
+ 'server_hostname': hostname,
+ 'service': service,
+ 'server': server,
+ 'metadata': metadata,
+ 'status': status
+ }
+ daemon = dict(daemon, **get_daemon_health(daemon))
+ daemons.append(daemon)
+
+ return sorted(daemons, key=lambda k: k['instance_id'])
+
+ def get_daemon_health(daemon):
+ health = {
+ 'health_color': 'info',
+ 'health': 'Unknown'
+ }
+ for _, pool_data in daemon['status'].items():
+ if (health['health'] != 'error'
+ and [k for k, v in pool_data.get('callouts', {}).items()
+ if v['level'] == 'error']):
+ health = {
+ 'health_color': 'error',
+ 'health': 'Error'
+ }
+ elif (health['health'] != 'error'
+ and [k for k, v in pool_data.get('callouts', {}).items()
+ if v['level'] == 'warning']):
+ health = {
+ 'health_color': 'warning',
+ 'health': 'Warning'
+ }
+ elif health['health_color'] == 'info':
+ health = {
+ 'health_color': 'success',
+ 'health': 'OK'
+ }
+ return health
+
+ def get_pools(daemons): # pylint: disable=R0912, R0915
+ pool_names = [pool['pool_name'] for pool in CephService.get_pool_list('rbd')
+ if pool.get('type', 1) == 1]
+ pool_stats = {}
+ rbdctx = rbd.RBD()
+ for pool_name in pool_names:
+ logger.debug("Constructing IOCtx %s", pool_name)
+ try:
+ ioctx = mgr.rados.open_ioctx(pool_name)
+ except TypeError:
+ logger.exception("Failed to open pool %s", pool_name)
+ continue
+
+ try:
+ mirror_mode = rbdctx.mirror_mode_get(ioctx)
+ peer_uuids = [x['uuid'] for x in rbdctx.mirror_peer_list(ioctx)]
+ except: # noqa pylint: disable=W0702
+ logger.exception("Failed to query mirror settings %s", pool_name)
+ mirror_mode = None
+ peer_uuids = []
+
+ stats = {}
+ if mirror_mode == rbd.RBD_MIRROR_MODE_DISABLED:
+ mirror_mode = "disabled"
+ stats['health_color'] = "info"
+ stats['health'] = "Disabled"
+ elif mirror_mode == rbd.RBD_MIRROR_MODE_IMAGE:
+ mirror_mode = "image"
+ elif mirror_mode == rbd.RBD_MIRROR_MODE_POOL:
+ mirror_mode = "pool"
+ else:
+ mirror_mode = "unknown"
+ stats['health_color'] = "warning"
+ stats['health'] = "Warning"
+
+ pool_stats[pool_name] = dict(stats, **{
+ 'mirror_mode': mirror_mode,
+ 'peer_uuids': peer_uuids
+ })
+
+ for daemon in daemons:
+ for _, pool_data in daemon['status'].items():
+ stats = pool_stats.get(pool_data['name'], None)
+ if stats is None:
+ continue
+
+ if pool_data.get('leader', False):
+ # leader instance stores image counts
+ stats['leader_id'] = daemon['metadata']['instance_id']
+ stats['image_local_count'] = pool_data.get('image_local_count', 0)
+ stats['image_remote_count'] = pool_data.get('image_remote_count', 0)
+
+ if (stats.get('health_color', '') != 'error'
+ and pool_data.get('image_error_count', 0) > 0):
+ stats['health_color'] = 'error'
+ stats['health'] = 'Error'
+ elif (stats.get('health_color', '') != 'error'
+ and pool_data.get('image_warning_count', 0) > 0):
+ stats['health_color'] = 'warning'
+ stats['health'] = 'Warning'
+ elif stats.get('health', None) is None:
+ stats['health_color'] = 'success'
+ stats['health'] = 'OK'
+
+ for _, stats in pool_stats.items():
+ if stats['mirror_mode'] == 'disabled':
+ continue
+ if stats.get('health', None) is None:
+ # daemon doesn't know about pool
+ stats['health_color'] = 'error'
+ stats['health'] = 'Error'
+ elif stats.get('leader_id', None) is None:
+ # no daemons are managing the pool as leader instance
+ stats['health_color'] = 'warning'
+ stats['health'] = 'Warning'
+ return pool_stats
+
+ daemons = get_daemons()
+ return {
+ 'daemons': daemons,
+ 'pools': get_pools(daemons)
+ }
+
+
+@ViewCache()
+def _get_pool_datum(pool_name):
+ data = {}
+ logger.debug("Constructing IOCtx %s", pool_name)
+ try:
+ ioctx = mgr.rados.open_ioctx(pool_name)
+ except TypeError:
+ logger.exception("Failed to open pool %s", pool_name)
+ return None
+
+ mirror_state = {
+ 'down': {
+ 'health': 'issue',
+ 'state_color': 'warning',
+ 'state': 'Unknown',
+ 'description': None
+ },
+ rbd.MIRROR_IMAGE_STATUS_STATE_UNKNOWN: {
+ 'health': 'issue',
+ 'state_color': 'warning',
+ 'state': 'Unknown'
+ },
+ rbd.MIRROR_IMAGE_STATUS_STATE_ERROR: {
+ 'health': 'issue',
+ 'state_color': 'error',
+ 'state': 'Error'
+ },
+ rbd.MIRROR_IMAGE_STATUS_STATE_SYNCING: {
+ 'health': 'syncing'
+ },
+ rbd.MIRROR_IMAGE_STATUS_STATE_STARTING_REPLAY: {
+ 'health': 'ok',
+ 'state_color': 'success',
+ 'state': 'Starting'
+ },
+ rbd.MIRROR_IMAGE_STATUS_STATE_REPLAYING: {
+ 'health': 'ok',
+ 'state_color': 'success',
+ 'state': 'Replaying'
+ },
+ rbd.MIRROR_IMAGE_STATUS_STATE_STOPPING_REPLAY: {
+ 'health': 'ok',
+ 'state_color': 'success',
+ 'state': 'Stopping'
+ },
+ rbd.MIRROR_IMAGE_STATUS_STATE_STOPPED: {
+ 'health': 'ok',
+ 'state_color': 'info',
+ 'state': 'Primary'
+ }
+ }
+
+ rbdctx = rbd.RBD()
+ try:
+ mirror_image_status = rbdctx.mirror_image_status_list(ioctx)
+ data['mirror_images'] = sorted([
+ dict({
+ 'name': image['name'],
+ 'description': image['description']
+ }, **mirror_state['down' if not image['up'] else image['state']])
+ for image in mirror_image_status
+ ], key=lambda k: k['name'])
+ except rbd.ImageNotFound:
+ pass
+ except: # noqa pylint: disable=W0702
+ logger.exception("Failed to list mirror image status %s", pool_name)
+ raise
+
+ return data
+
+
+@ViewCache()
+def _get_content_data(): # pylint: disable=R0914
+ pool_names = [pool['pool_name'] for pool in CephService.get_pool_list('rbd')
+ if pool.get('type', 1) == 1]
+ _, data = get_daemons_and_pools()
+ daemons = data.get('daemons', [])
+ pool_stats = data.get('pools', {})
+
+ pools = []
+ image_error = []
+ image_syncing = []
+ image_ready = []
+ for pool_name in pool_names:
+ _, pool = _get_pool_datum(pool_name)
+ if not pool:
+ pool = {}
+
+ stats = pool_stats.get(pool_name, {})
+ if stats.get('mirror_mode', None) is None:
+ continue
+
+ mirror_images = pool.get('mirror_images', [])
+ for mirror_image in mirror_images:
+ image = {
+ 'pool_name': pool_name,
+ 'name': mirror_image['name']
+ }
+
+ if mirror_image['health'] == 'ok':
+ image.update({
+ 'state_color': mirror_image['state_color'],
+ 'state': mirror_image['state'],
+ 'description': mirror_image['description']
+ })
+ image_ready.append(image)
+ elif mirror_image['health'] == 'syncing':
+ p = re.compile("bootstrapping, IMAGE_COPY/COPY_OBJECT (.*)%")
+ image.update({
+ 'progress': (p.findall(mirror_image['description']) or [0])[0]
+ })
+ image_syncing.append(image)
+ else:
+ image.update({
+ 'state_color': mirror_image['state_color'],
+ 'state': mirror_image['state'],
+ 'description': mirror_image['description']
+ })
+ image_error.append(image)
+
+ pools.append(dict({
+ 'name': pool_name
+ }, **stats))
+
+ return {
+ 'daemons': daemons,
+ 'pools': pools,
+ 'image_error': image_error,
+ 'image_syncing': image_syncing,
+ 'image_ready': image_ready
+ }
+
+
+def _reset_view_cache():
+ get_daemons_and_pools.reset()
+ _get_pool_datum.reset()
+ _get_content_data.reset()
+
+
+@ApiController('/block/mirroring/summary', Scope.RBD_MIRRORING)
+class RbdMirroringSummary(BaseController):
+
+ @Endpoint()
+ @handle_rbd_mirror_error()
+ @ReadPermission
+ def __call__(self):
+ status, content_data = _get_content_data()
+ return {'status': status, 'content_data': content_data}
+
+
+@ApiController('/block/mirroring/pool', Scope.RBD_MIRRORING)
+class RbdMirroringPoolMode(RESTController):
+
+ RESOURCE_ID = "pool_name"
+ MIRROR_MODES = {
+ rbd.RBD_MIRROR_MODE_DISABLED: 'disabled',
+ rbd.RBD_MIRROR_MODE_IMAGE: 'image',
+ rbd.RBD_MIRROR_MODE_POOL: 'pool'
+ }
+
+ @handle_rbd_mirror_error()
+ def get(self, pool_name):
+ ioctx = mgr.rados.open_ioctx(pool_name)
+ mode = rbd.RBD().mirror_mode_get(ioctx)
+ data = {
+ 'mirror_mode': self.MIRROR_MODES.get(mode, 'unknown')
+ }
+ return data
+
+ @RbdMirroringTask('pool/edit', {'pool_name': '{pool_name}'}, 5.0)
+ def set(self, pool_name, mirror_mode=None):
+ def _edit(ioctx, mirror_mode=None):
+ if mirror_mode:
+ mode_enum = {x[1]: x[0] for x in
+ self.MIRROR_MODES.items()}.get(mirror_mode, None)
+ if mode_enum is None:
+ raise rbd.Error('invalid mirror mode "{}"'.format(mirror_mode))
+
+ current_mode_enum = rbd.RBD().mirror_mode_get(ioctx)
+ if mode_enum != current_mode_enum:
+ rbd.RBD().mirror_mode_set(ioctx, mode_enum)
+ _reset_view_cache()
+
+ return _rbd_call(pool_name, _edit, mirror_mode)
+
+
+@ApiController('/block/mirroring/pool/{pool_name}/peer', Scope.RBD_MIRRORING)
+class RbdMirroringPoolPeer(RESTController):
+
+ RESOURCE_ID = "peer_uuid"
+
+ @handle_rbd_mirror_error()
+ def list(self, pool_name):
+ ioctx = mgr.rados.open_ioctx(pool_name)
+ peer_list = rbd.RBD().mirror_peer_list(ioctx)
+ return [x['uuid'] for x in peer_list]
+
+ @handle_rbd_mirror_error()
+ def create(self, pool_name, cluster_name, client_id, mon_host=None,
+ key=None):
+ ioctx = mgr.rados.open_ioctx(pool_name)
+ mode = rbd.RBD().mirror_mode_get(ioctx)
+ if mode == rbd.RBD_MIRROR_MODE_DISABLED:
+ raise rbd.Error('mirroring must be enabled')
+
+ uuid = rbd.RBD().mirror_peer_add(ioctx, cluster_name,
+ 'client.{}'.format(client_id))
+
+ attributes = {}
+ if mon_host is not None:
+ attributes[rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST] = mon_host
+ if key is not None:
+ attributes[rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY] = key
+ if attributes:
+ rbd.RBD().mirror_peer_set_attributes(ioctx, uuid, attributes)
+
+ _reset_view_cache()
+ return {'uuid': uuid}
+
+ @handle_rbd_mirror_error()
+ def get(self, pool_name, peer_uuid):
+ ioctx = mgr.rados.open_ioctx(pool_name)
+ peer_list = rbd.RBD().mirror_peer_list(ioctx)
+ peer = next((x for x in peer_list if x['uuid'] == peer_uuid), None)
+ if not peer:
+ raise cherrypy.HTTPError(404)
+
+ # convert full client name to just the client id
+ peer['client_id'] = peer['client_name'].split('.', 1)[-1]
+ del peer['client_name']
+
+ try:
+ attributes = rbd.RBD().mirror_peer_get_attributes(ioctx, peer_uuid)
+ except rbd.ImageNotFound:
+ attributes = {}
+
+ peer['mon_host'] = attributes.get(rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST, '')
+ peer['key'] = attributes.get(rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY, '')
+ return peer
+
+ @handle_rbd_mirror_error()
+ def delete(self, pool_name, peer_uuid):
+ ioctx = mgr.rados.open_ioctx(pool_name)
+ rbd.RBD().mirror_peer_remove(ioctx, peer_uuid)
+ _reset_view_cache()
+
+ @handle_rbd_mirror_error()
+ def set(self, pool_name, peer_uuid, cluster_name=None, client_id=None,
+ mon_host=None, key=None):
+ ioctx = mgr.rados.open_ioctx(pool_name)
+ if cluster_name:
+ rbd.RBD().mirror_peer_set_cluster(ioctx, peer_uuid, cluster_name)
+ if client_id:
+ rbd.RBD().mirror_peer_set_client(ioctx, peer_uuid,
+ 'client.{}'.format(client_id))
+
+ if mon_host is not None or key is not None:
+ try:
+ attributes = rbd.RBD().mirror_peer_get_attributes(ioctx, peer_uuid)
+ except rbd.ImageNotFound:
+ attributes = {}
+
+ if mon_host is not None:
+ attributes[rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST] = mon_host
+ if key is not None:
+ attributes[rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY] = key
+ rbd.RBD().mirror_peer_set_attributes(ioctx, peer_uuid, attributes)
+
+ _reset_view_cache()
diff --git a/src/pybind/mgr/dashboard/controllers/rgw.py b/src/pybind/mgr/dashboard/controllers/rgw.py
new file mode 100644
index 00000000..085155aa
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/rgw.py
@@ -0,0 +1,385 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import json
+
+import cherrypy
+
+from . import ApiController, BaseController, RESTController, Endpoint, \
+ ReadPermission, allow_empty_body
+from .. import logger
+from ..exceptions import DashboardException
+from ..rest_client import RequestException
+from ..security import Scope, Permission
+from ..services.auth import AuthManager, JwtManager
+from ..services.ceph_service import CephService
+from ..services.rgw_client import RgwClient
+
+
+@ApiController('/rgw', Scope.RGW)
+class Rgw(BaseController):
+
+ @Endpoint()
+ @ReadPermission
+ def status(self):
+ status = {'available': False, 'message': None}
+ try:
+ if not CephService.get_service_list('rgw'):
+ raise LookupError('No RGW service is running.')
+ instance = RgwClient.admin_instance()
+ # Check if the service is online.
+ try:
+ is_online = instance.is_service_online()
+ except RequestException as e:
+ # Drop this instance because the RGW client seems not to
+ # exist anymore (maybe removed via orchestrator). Removing
+ # the instance from the cache will result in the correct
+ # error message next time when the backend tries to
+ # establish a new connection (-> 'No RGW found' instead
+ # of 'RGW REST API failed request ...').
+ # Note, this only applies to auto-detected RGW clients.
+ RgwClient.drop_instance(instance.userid)
+ raise e
+ if not is_online:
+ msg = 'Failed to connect to the Object Gateway\'s Admin Ops API.'
+ raise RequestException(msg)
+ # Ensure the API user ID is known by the RGW.
+ if not instance.user_exists():
+ msg = 'The user "{}" is unknown to the Object Gateway.'.format(
+ instance.userid)
+ raise RequestException(msg)
+ # Ensure the system flag is set for the API user ID.
+ if not instance.is_system_user():
+ msg = 'The system flag is not set for user "{}".'.format(
+ instance.userid)
+ raise RequestException(msg)
+ status['available'] = True
+ except (RequestException, LookupError) as ex:
+ status['message'] = str(ex)
+ return status
+
+
+@ApiController('/rgw/daemon', Scope.RGW)
+class RgwDaemon(RESTController):
+
+ def list(self):
+ daemons = []
+ for hostname, server in CephService.get_service_map('rgw').items():
+ for service in server['services']:
+ metadata = service['metadata']
+
+ # extract per-daemon service data and health
+ daemon = {
+ 'id': service['id'],
+ 'version': metadata['ceph_version'],
+ 'server_hostname': hostname
+ }
+
+ daemons.append(daemon)
+
+ return sorted(daemons, key=lambda k: k['id'])
+
+ def get(self, svc_id):
+ daemon = {
+ 'rgw_metadata': [],
+ 'rgw_id': svc_id,
+ 'rgw_status': []
+ }
+ service = CephService.get_service('rgw', svc_id)
+ if not service:
+ raise cherrypy.NotFound('Service rgw {} is not available'.format(svc_id))
+
+ metadata = service['metadata']
+ status = service['status']
+ if 'json' in status:
+ try:
+ status = json.loads(status['json'])
+ except ValueError:
+ logger.warning('%s had invalid status json', service['id'])
+ status = {}
+ else:
+ logger.warning('%s has no key "json" in status', service['id'])
+
+ daemon['rgw_metadata'] = metadata
+ daemon['rgw_status'] = status
+ return daemon
+
+
+class RgwRESTController(RESTController):
+
+ def proxy(self, method, path, params=None, json_response=True):
+ try:
+ instance = RgwClient.admin_instance()
+ result = instance.proxy(method, path, params, None)
+ if json_response and result != '':
+ result = json.loads(result.decode('utf-8'))
+ return result
+ except (DashboardException, RequestException) as e:
+ raise DashboardException(e, http_status_code=500, component='rgw')
+
+
+@ApiController('/rgw/bucket', Scope.RGW)
+class RgwBucket(RgwRESTController):
+
+ def _append_bid(self, bucket):
+ """
+ Append the bucket identifier that looks like [<tenant>/]<bucket>.
+ See http://docs.ceph.com/docs/nautilus/radosgw/multitenancy/ for
+ more information.
+ :param bucket: The bucket parameters.
+ :type bucket: dict
+ :return: The modified bucket parameters including the 'bid' parameter.
+ :rtype: dict
+ """
+ if isinstance(bucket, dict):
+ bucket['bid'] = '{}/{}'.format(bucket['tenant'], bucket['bucket']) \
+ if bucket['tenant'] else bucket['bucket']
+ return bucket
+
+ @staticmethod
+ def strip_tenant_from_bucket_name(bucket_name, uid):
+ # type (str, str) => str
+ """
+ When linking a bucket to a new user belonging to same tenant
+ as the previous owner, tenant must be removed from the bucket name.
+ >>> RgwBucket.strip_tenant_from_bucket_name('tenant/bucket-name', 'tenant$user1')
+ 'bucket-name'
+ >>> RgwBucket.strip_tenant_from_bucket_name('tenant/bucket-name', 'tenant2$user2')
+ 'tenant/bucket-name'
+ >>> RgwBucket.strip_tenant_from_bucket_name('bucket-name', 'user1')
+ 'bucket-name'
+ """
+ bucket_tenant = bucket_name[:bucket_name.find('/')] if bucket_name.find('/') >= 0 else None
+ uid_tenant = uid[:uid.find('$')] if uid.find('$') >= 0 else None
+ if bucket_tenant and uid_tenant and bucket_tenant == uid_tenant:
+ return bucket_name[bucket_name.find('/') + 1:]
+
+ return bucket_name
+
+ def list(self, stats=False):
+ query_params = '?stats' if stats else ''
+ result = self.proxy('GET', 'bucket{}'.format(query_params))
+
+ if stats:
+ result = [self._append_bid(bucket) for bucket in result]
+
+ return result
+
+ def get(self, bucket):
+ result = self.proxy('GET', 'bucket', {'bucket': bucket})
+ return self._append_bid(result)
+
+ @allow_empty_body
+ def create(self, bucket, uid):
+ try:
+ rgw_client = RgwClient.instance(uid)
+ return rgw_client.create_bucket(bucket)
+ except RequestException as e:
+ raise DashboardException(e, http_status_code=500, component='rgw')
+
+ @allow_empty_body
+ def set(self, bucket, bucket_id, uid):
+ result = self.proxy('PUT', 'bucket', {
+ 'bucket': RgwBucket.strip_tenant_from_bucket_name(bucket, uid),
+ 'bucket-id': bucket_id,
+ 'uid': uid
+ }, json_response=False)
+ return self._append_bid(result)
+
+ def delete(self, bucket, purge_objects='true'):
+ return self.proxy('DELETE', 'bucket', {
+ 'bucket': bucket,
+ 'purge-objects': purge_objects
+ }, json_response=False)
+
+
+@ApiController('/rgw/user', Scope.RGW)
+class RgwUser(RgwRESTController):
+
+ def _append_uid(self, user):
+ """
+ Append the user identifier that looks like [<tenant>$]<user>.
+ See http://docs.ceph.com/docs/jewel/radosgw/multitenancy/ for
+ more information.
+ :param user: The user parameters.
+ :type user: dict
+ :return: The modified user parameters including the 'uid' parameter.
+ :rtype: dict
+ """
+ if isinstance(user, dict):
+ user['uid'] = '{}${}'.format(user['tenant'], user['user_id']) \
+ if user['tenant'] else user['user_id']
+ return user
+
+ @staticmethod
+ def _keys_allowed():
+ permissions = AuthManager.get_user(JwtManager.get_username()).permissions_dict()
+ edit_permissions = [Permission.CREATE, Permission.UPDATE, Permission.DELETE]
+ return Scope.RGW in permissions and Permission.READ in permissions[Scope.RGW] \
+ and len(set(edit_permissions).intersection(set(permissions[Scope.RGW]))) > 0
+
+ def list(self):
+ users = []
+ marker = None
+ while True:
+ params = {}
+ if marker:
+ params['marker'] = marker
+ result = self.proxy('GET', 'user?list', params)
+ users.extend(result['keys'])
+ if not result['truncated']:
+ break
+ # Make sure there is a marker.
+ assert result['marker']
+ # Make sure the marker has changed.
+ assert marker != result['marker']
+ marker = result['marker']
+ return users
+
+ def get(self, uid):
+ result = self.proxy('GET', 'user', {'uid': uid})
+ if not self._keys_allowed():
+ del result['keys']
+ del result['swift_keys']
+ return self._append_uid(result)
+
+ @Endpoint()
+ @ReadPermission
+ def get_emails(self):
+ emails = []
+ for uid in json.loads(self.list()):
+ user = json.loads(self.get(uid))
+ if user["email"]:
+ emails.append(user["email"])
+ return emails
+
+ @allow_empty_body
+ def create(self, uid, display_name, email=None, max_buckets=None,
+ suspended=None, generate_key=None, access_key=None,
+ secret_key=None):
+ params = {'uid': uid}
+ if display_name is not None:
+ params['display-name'] = display_name
+ if email is not None:
+ params['email'] = email
+ if max_buckets is not None:
+ params['max-buckets'] = max_buckets
+ if suspended is not None:
+ params['suspended'] = suspended
+ if generate_key is not None:
+ params['generate-key'] = generate_key
+ if access_key is not None:
+ params['access-key'] = access_key
+ if secret_key is not None:
+ params['secret-key'] = secret_key
+ result = self.proxy('PUT', 'user', params)
+ return self._append_uid(result)
+
+ @allow_empty_body
+ def set(self, uid, display_name=None, email=None, max_buckets=None,
+ suspended=None):
+ params = {'uid': uid}
+ if display_name is not None:
+ params['display-name'] = display_name
+ if email is not None:
+ params['email'] = email
+ if max_buckets is not None:
+ params['max-buckets'] = max_buckets
+ if suspended is not None:
+ params['suspended'] = suspended
+ result = self.proxy('POST', 'user', params)
+ return self._append_uid(result)
+
+ def delete(self, uid):
+ try:
+ instance = RgwClient.admin_instance()
+ # Ensure the user is not configured to access the RGW Object Gateway.
+ if instance.userid == uid:
+ raise DashboardException(msg='Unable to delete "{}" - this user '
+ 'account is required for managing the '
+ 'Object Gateway'.format(uid))
+ # Finally redirect request to the RGW proxy.
+ return self.proxy('DELETE', 'user', {'uid': uid}, json_response=False)
+ except (DashboardException, RequestException) as e:
+ raise DashboardException(e, component='rgw')
+
+ # pylint: disable=redefined-builtin
+ @RESTController.Resource(method='POST', path='/capability', status=201)
+ @allow_empty_body
+ def create_cap(self, uid, type, perm):
+ return self.proxy('PUT', 'user?caps', {
+ 'uid': uid,
+ 'user-caps': '{}={}'.format(type, perm)
+ })
+
+ # pylint: disable=redefined-builtin
+ @RESTController.Resource(method='DELETE', path='/capability', status=204)
+ def delete_cap(self, uid, type, perm):
+ return self.proxy('DELETE', 'user?caps', {
+ 'uid': uid,
+ 'user-caps': '{}={}'.format(type, perm)
+ })
+
+ @RESTController.Resource(method='POST', path='/key', status=201)
+ @allow_empty_body
+ def create_key(self, uid, key_type='s3', subuser=None, generate_key='true',
+ access_key=None, secret_key=None):
+ params = {'uid': uid, 'key-type': key_type, 'generate-key': generate_key}
+ if subuser is not None:
+ params['subuser'] = subuser
+ if access_key is not None:
+ params['access-key'] = access_key
+ if secret_key is not None:
+ params['secret-key'] = secret_key
+ return self.proxy('PUT', 'user?key', params)
+
+ @RESTController.Resource(method='DELETE', path='/key', status=204)
+ def delete_key(self, uid, key_type='s3', subuser=None, access_key=None):
+ params = {'uid': uid, 'key-type': key_type}
+ if subuser is not None:
+ params['subuser'] = subuser
+ if access_key is not None:
+ params['access-key'] = access_key
+ return self.proxy('DELETE', 'user?key', params, json_response=False)
+
+ @RESTController.Resource(method='GET', path='/quota')
+ def get_quota(self, uid):
+ return self.proxy('GET', 'user?quota', {'uid': uid})
+
+ @RESTController.Resource(method='PUT', path='/quota')
+ @allow_empty_body
+ def set_quota(self, uid, quota_type, enabled, max_size_kb, max_objects):
+ return self.proxy('PUT', 'user?quota', {
+ 'uid': uid,
+ 'quota-type': quota_type,
+ 'enabled': enabled,
+ 'max-size-kb': max_size_kb,
+ 'max-objects': max_objects
+ }, json_response=False)
+
+ @RESTController.Resource(method='POST', path='/subuser', status=201)
+ @allow_empty_body
+ def create_subuser(self, uid, subuser, access, key_type='s3',
+ generate_secret='true', access_key=None,
+ secret_key=None):
+ return self.proxy('PUT', 'user', {
+ 'uid': uid,
+ 'subuser': subuser,
+ 'key-type': key_type,
+ 'access': access,
+ 'generate-secret': generate_secret,
+ 'access-key': access_key,
+ 'secret-key': secret_key
+ })
+
+ @RESTController.Resource(method='DELETE', path='/subuser/{subuser}', status=204)
+ def delete_subuser(self, uid, subuser, purge_keys='true'):
+ """
+ :param purge_keys: Set to False to do not purge the keys.
+ Note, this only works for s3 subusers.
+ """
+ return self.proxy('DELETE', 'user', {
+ 'uid': uid,
+ 'subuser': subuser,
+ 'purge-keys': purge_keys
+ }, json_response=False)
diff --git a/src/pybind/mgr/dashboard/controllers/role.py b/src/pybind/mgr/dashboard/controllers/role.py
new file mode 100644
index 00000000..f87eff7b
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/role.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import cherrypy
+
+from . import ApiController, RESTController, UiApiController
+from .. import mgr
+from ..exceptions import RoleDoesNotExist, DashboardException,\
+ RoleIsAssociatedWithUser, RoleAlreadyExists
+from ..security import Scope as SecurityScope, Permission
+from ..services.access_control import SYSTEM_ROLES
+
+
+@ApiController('/role', SecurityScope.USER)
+class Role(RESTController):
+ @staticmethod
+ def _role_to_dict(role):
+ role_dict = role.to_dict()
+ role_dict['system'] = role_dict['name'] in SYSTEM_ROLES
+ return role_dict
+
+ @staticmethod
+ def _validate_permissions(scopes_permissions):
+ if scopes_permissions:
+ for scope, permissions in scopes_permissions.items():
+ if scope not in SecurityScope.all_scopes():
+ raise DashboardException(msg='Invalid scope',
+ code='invalid_scope',
+ component='role')
+ if any(permission not in Permission.all_permissions()
+ for permission in permissions):
+ raise DashboardException(msg='Invalid permission',
+ code='invalid_permission',
+ component='role')
+
+ @staticmethod
+ def _set_permissions(role, scopes_permissions):
+ role.reset_scope_permissions()
+ if scopes_permissions:
+ for scope, permissions in scopes_permissions.items():
+ if permissions:
+ role.set_scope_permissions(scope, permissions)
+
+ def list(self):
+ roles = dict(mgr.ACCESS_CTRL_DB.roles)
+ roles.update(SYSTEM_ROLES)
+ roles = sorted(roles.values(), key=lambda role: role.name)
+ return [Role._role_to_dict(r) for r in roles]
+
+ def get(self, name):
+ role = SYSTEM_ROLES.get(name)
+ if not role:
+ try:
+ role = mgr.ACCESS_CTRL_DB.get_role(name)
+ except RoleDoesNotExist:
+ raise cherrypy.HTTPError(404)
+ return Role._role_to_dict(role)
+
+ def create(self, name=None, description=None, scopes_permissions=None):
+ if not name:
+ raise DashboardException(msg='Name is required',
+ code='name_required',
+ component='role')
+ Role._validate_permissions(scopes_permissions)
+ try:
+ role = mgr.ACCESS_CTRL_DB.create_role(name, description)
+ except RoleAlreadyExists:
+ raise DashboardException(msg='Role already exists',
+ code='role_already_exists',
+ component='role')
+ Role._set_permissions(role, scopes_permissions)
+ mgr.ACCESS_CTRL_DB.save()
+ return Role._role_to_dict(role)
+
+ def set(self, name, description=None, scopes_permissions=None):
+ try:
+ role = mgr.ACCESS_CTRL_DB.get_role(name)
+ except RoleDoesNotExist:
+ if name in SYSTEM_ROLES:
+ raise DashboardException(msg='Cannot update system role',
+ code='cannot_update_system_role',
+ component='role')
+ raise cherrypy.HTTPError(404)
+ Role._validate_permissions(scopes_permissions)
+ Role._set_permissions(role, scopes_permissions)
+ role.description = description
+ mgr.ACCESS_CTRL_DB.update_users_with_roles(role)
+ mgr.ACCESS_CTRL_DB.save()
+ return Role._role_to_dict(role)
+
+ def delete(self, name):
+ try:
+ mgr.ACCESS_CTRL_DB.delete_role(name)
+ except RoleDoesNotExist:
+ if name in SYSTEM_ROLES:
+ raise DashboardException(msg='Cannot delete system role',
+ code='cannot_delete_system_role',
+ component='role')
+ raise cherrypy.HTTPError(404)
+ except RoleIsAssociatedWithUser:
+ raise DashboardException(msg='Role is associated with user',
+ code='role_is_associated_with_user',
+ component='role')
+ mgr.ACCESS_CTRL_DB.save()
+
+
+@UiApiController('/scope', SecurityScope.USER)
+class Scope(RESTController):
+ def list(self):
+ return SecurityScope.all_scopes()
diff --git a/src/pybind/mgr/dashboard/controllers/saml2.py b/src/pybind/mgr/dashboard/controllers/saml2.py
new file mode 100644
index 00000000..f02f81fe
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/saml2.py
@@ -0,0 +1,113 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import sys
+import cherrypy
+
+try:
+ from onelogin.saml2.auth import OneLogin_Saml2_Auth
+ from onelogin.saml2.errors import OneLogin_Saml2_Error
+ from onelogin.saml2.settings import OneLogin_Saml2_Settings
+
+ python_saml_imported = True
+except ImportError:
+ python_saml_imported = False
+
+from .. import mgr
+from ..exceptions import UserDoesNotExist
+from ..services.auth import JwtManager
+from ..tools import prepare_url_prefix
+from . import BaseController, Controller, Endpoint, allow_empty_body, set_cookies
+
+
+@Controller('/auth/saml2', secure=False)
+class Saml2(BaseController):
+
+ @staticmethod
+ def _build_req(request, post_data):
+ return {
+ 'https': 'on' if request.scheme == 'https' else 'off',
+ 'http_host': request.host,
+ 'script_name': request.path_info,
+ 'server_port': str(request.port),
+ 'get_data': {},
+ 'post_data': post_data
+ }
+
+ @staticmethod
+ def _check_python_saml():
+ if not python_saml_imported:
+ python_saml_name = 'python3-saml' if sys.version_info >= (3, 0) else 'python-saml'
+ raise cherrypy.HTTPError(400,
+ 'Required library not found: `{}`'.format(python_saml_name))
+ try:
+ OneLogin_Saml2_Settings(mgr.SSO_DB.saml2.onelogin_settings)
+ except OneLogin_Saml2_Error:
+ raise cherrypy.HTTPError(400, 'Single Sign-On is not configured.')
+
+ @Endpoint('POST', path="")
+ @allow_empty_body
+ def auth_response(self, **kwargs):
+ Saml2._check_python_saml()
+ req = Saml2._build_req(self._request, kwargs)
+ auth = OneLogin_Saml2_Auth(req, mgr.SSO_DB.saml2.onelogin_settings)
+ auth.process_response()
+ errors = auth.get_errors()
+
+ if auth.is_authenticated():
+ JwtManager.reset_user()
+ username_attribute = auth.get_attribute(mgr.SSO_DB.saml2.get_username_attribute())
+ if username_attribute is None:
+ raise cherrypy.HTTPError(400,
+ 'SSO error - `{}` not found in auth attributes. '
+ 'Received attributes: {}'
+ .format(
+ mgr.SSO_DB.saml2.get_username_attribute(),
+ auth.get_attributes()))
+ username = username_attribute[0]
+ url_prefix = prepare_url_prefix(mgr.get_module_option('url_prefix', default=''))
+ try:
+ mgr.ACCESS_CTRL_DB.get_user(username)
+ except UserDoesNotExist:
+ raise cherrypy.HTTPRedirect("{}/#/sso/404".format(url_prefix))
+
+ token = JwtManager.gen_token(username)
+ JwtManager.set_user(JwtManager.decode_token(token))
+ token = token.decode('utf-8')
+ set_cookies(url_prefix, token)
+ raise cherrypy.HTTPRedirect("{}/#/login?access_token={}".format(url_prefix, token))
+ else:
+ return {
+ 'is_authenticated': auth.is_authenticated(),
+ 'errors': errors,
+ 'reason': auth.get_last_error_reason()
+ }
+
+ @Endpoint(xml=True)
+ def metadata(self):
+ Saml2._check_python_saml()
+ saml_settings = OneLogin_Saml2_Settings(mgr.SSO_DB.saml2.onelogin_settings)
+ return saml_settings.get_sp_metadata()
+
+ @Endpoint(json_response=False)
+ def login(self):
+ Saml2._check_python_saml()
+ req = Saml2._build_req(self._request, {})
+ auth = OneLogin_Saml2_Auth(req, mgr.SSO_DB.saml2.onelogin_settings)
+ raise cherrypy.HTTPRedirect(auth.login())
+
+ @Endpoint(json_response=False)
+ def slo(self):
+ Saml2._check_python_saml()
+ req = Saml2._build_req(self._request, {})
+ auth = OneLogin_Saml2_Auth(req, mgr.SSO_DB.saml2.onelogin_settings)
+ raise cherrypy.HTTPRedirect(auth.logout())
+
+ @Endpoint(json_response=False)
+ def logout(self, **kwargs):
+ # pylint: disable=unused-argument
+ Saml2._check_python_saml()
+ JwtManager.reset_user()
+ cherrypy.response.cookie['token'] = {'expires': 0, 'max-age': 0}
+ url_prefix = prepare_url_prefix(mgr.get_module_option('url_prefix', default=''))
+ raise cherrypy.HTTPRedirect("{}/#/login".format(url_prefix))
diff --git a/src/pybind/mgr/dashboard/controllers/settings.py b/src/pybind/mgr/dashboard/controllers/settings.py
new file mode 100644
index 00000000..a484580e
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/settings.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+from contextlib import contextmanager
+
+import cherrypy
+
+from . import ApiController, RESTController
+from ..settings import Settings as SettingsModule, Options
+from ..security import Scope
+
+
+@ApiController('/settings', Scope.CONFIG_OPT)
+class Settings(RESTController):
+ """
+ Enables to manage the settings of the dashboard (not the Ceph cluster).
+ """
+ @contextmanager
+ def _attribute_handler(self, name):
+ """
+ :type name: str|dict[str, str]
+ :rtype: str|dict[str, str]
+ """
+ if isinstance(name, dict):
+ result = {self._to_native(key): value
+ for key, value in name.items()}
+ else:
+ result = self._to_native(name)
+
+ try:
+ yield result
+ except AttributeError:
+ raise cherrypy.NotFound(result)
+
+ @staticmethod
+ def _to_native(setting):
+ return setting.upper().replace('-', '_')
+
+ def list(self):
+ return [
+ self._get(name) for name in Options.__dict__
+ if name.isupper() and not name.startswith('_')
+ ]
+
+ def _get(self, name):
+ with self._attribute_handler(name) as sname:
+ default, data_type = getattr(Options, sname)
+ return {
+ 'name': sname,
+ 'default': default,
+ 'type': data_type.__name__,
+ 'value': getattr(SettingsModule, sname)
+ }
+
+ def get(self, name):
+ return self._get(name)
+
+ def set(self, name, value):
+ with self._attribute_handler(name) as sname:
+ setattr(SettingsModule, self._to_native(sname), value)
+
+ def delete(self, name):
+ with self._attribute_handler(name) as sname:
+ delattr(SettingsModule, self._to_native(sname))
+
+ def bulk_set(self, **kwargs):
+ with self._attribute_handler(kwargs) as data:
+ for name, value in data.items():
+ setattr(SettingsModule, self._to_native(name), value)
diff --git a/src/pybind/mgr/dashboard/controllers/summary.py b/src/pybind/mgr/dashboard/controllers/summary.py
new file mode 100644
index 00000000..09d69ecf
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/summary.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import json
+
+from . import ApiController, Endpoint, BaseController
+from .. import mgr
+from ..security import Permission, Scope
+from ..controllers.rbd_mirroring import get_daemons_and_pools
+from ..exceptions import ViewCacheNoDataException
+from ..tools import TaskManager
+
+
+@ApiController('/summary')
+class Summary(BaseController):
+ def _health_status(self):
+ health_data = mgr.get("health")
+ return json.loads(health_data["json"])['status']
+
+ def _rbd_mirroring(self):
+ try:
+ _, data = get_daemons_and_pools()
+ except ViewCacheNoDataException:
+ return {}
+
+ daemons = data.get('daemons', [])
+ pools = data.get('pools', {})
+
+ warnings = 0
+ errors = 0
+ for daemon in daemons:
+ if daemon['health_color'] == 'error':
+ errors += 1
+ elif daemon['health_color'] == 'warning':
+ warnings += 1
+ for _, pool in pools.items():
+ if pool['health_color'] == 'error':
+ errors += 1
+ elif pool['health_color'] == 'warning':
+ warnings += 1
+ return {'warnings': warnings, 'errors': errors}
+
+ def _task_permissions(self, name):
+ result = True
+ if name == 'pool/create':
+ result = self._has_permissions(Permission.CREATE, Scope.POOL)
+ elif name == 'pool/edit':
+ result = self._has_permissions(Permission.UPDATE, Scope.POOL)
+ elif name == 'pool/delete':
+ result = self._has_permissions(Permission.DELETE, Scope.POOL)
+ elif name in [
+ 'rbd/create', 'rbd/copy', 'rbd/snap/create',
+ 'rbd/clone', 'rbd/trash/restore']:
+ result = self._has_permissions(Permission.CREATE, Scope.RBD_IMAGE)
+ elif name in [
+ 'rbd/edit', 'rbd/snap/edit', 'rbd/flatten',
+ 'rbd/snap/rollback']:
+ result = self._has_permissions(Permission.UPDATE, Scope.RBD_IMAGE)
+ elif name in [
+ 'rbd/delete', 'rbd/snap/delete', 'rbd/trash/move',
+ 'rbd/trash/remove', 'rbd/trash/purge']:
+ result = self._has_permissions(Permission.DELETE, Scope.RBD_IMAGE)
+ return result
+
+ def _get_host(self):
+ # type: () -> str
+ services = mgr.get('mgr_map')['services']
+ return services['dashboard'] if 'dashboard' in services else ''
+
+ @Endpoint()
+ def __call__(self):
+ exe_t, fin_t = TaskManager.list_serializable()
+ executing_tasks = [task for task in exe_t if self._task_permissions(task['name'])]
+ finished_tasks = [task for task in fin_t if self._task_permissions(task['name'])]
+
+ result = {
+ 'health_status': self._health_status(),
+ 'mgr_id': mgr.get_mgr_id(),
+ 'mgr_host': self._get_host(),
+ 'have_mon_connection': mgr.have_mon_connection(),
+ 'executing_tasks': executing_tasks,
+ 'finished_tasks': finished_tasks,
+ 'version': mgr.version
+ }
+ if self._has_permissions(Permission.READ, Scope.RBD_MIRRORING):
+ result['rbd_mirroring'] = self._rbd_mirroring()
+ return result
diff --git a/src/pybind/mgr/dashboard/controllers/task.py b/src/pybind/mgr/dashboard/controllers/task.py
new file mode 100644
index 00000000..9380f070
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/task.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from . import ApiController, RESTController
+from ..tools import TaskManager
+
+
+@ApiController('/task')
+class Task(RESTController):
+ def list(self, name=None):
+ executing_t, finished_t = TaskManager.list_serializable(name)
+ return {
+ 'executing_tasks': executing_t,
+ 'finished_tasks': finished_t
+ }
diff --git a/src/pybind/mgr/dashboard/controllers/user.py b/src/pybind/mgr/dashboard/controllers/user.py
new file mode 100644
index 00000000..d99dead3
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/user.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import cherrypy
+
+from . import ApiController, RESTController
+from .. import mgr
+from ..exceptions import DashboardException, UserAlreadyExists, \
+ UserDoesNotExist
+from ..security import Scope
+from ..services.access_control import SYSTEM_ROLES
+from ..services.auth import JwtManager
+
+
+@ApiController('/user', Scope.USER)
+class User(RESTController):
+ @staticmethod
+ def _user_to_dict(user):
+ result = user.to_dict()
+ del result['password']
+ return result
+
+ @staticmethod
+ def _get_user_roles(roles):
+ all_roles = dict(mgr.ACCESS_CTRL_DB.roles)
+ all_roles.update(SYSTEM_ROLES)
+ try:
+ return [all_roles[rolename] for rolename in roles]
+ except KeyError:
+ raise DashboardException(msg='Role does not exist',
+ code='role_does_not_exist',
+ component='user')
+
+ def list(self):
+ users = mgr.ACCESS_CTRL_DB.users
+ result = [User._user_to_dict(u) for _, u in users.items()]
+ return result
+
+ def get(self, username):
+ try:
+ user = mgr.ACCESS_CTRL_DB.get_user(username)
+ except UserDoesNotExist:
+ raise cherrypy.HTTPError(404)
+ return User._user_to_dict(user)
+
+ def create(self, username=None, password=None, name=None, email=None, roles=None):
+ if not username:
+ raise DashboardException(msg='Username is required',
+ code='username_required',
+ component='user')
+ user_roles = None
+ if roles:
+ user_roles = User._get_user_roles(roles)
+ try:
+ user = mgr.ACCESS_CTRL_DB.create_user(username, password, name, email)
+ except UserAlreadyExists:
+ raise DashboardException(msg='Username already exists',
+ code='username_already_exists',
+ component='user')
+ if user_roles:
+ user.set_roles(user_roles)
+ mgr.ACCESS_CTRL_DB.save()
+ return User._user_to_dict(user)
+
+ def delete(self, username):
+ session_username = JwtManager.get_username()
+ if session_username == username:
+ raise DashboardException(msg='Cannot delete current user',
+ code='cannot_delete_current_user',
+ component='user')
+ try:
+ mgr.ACCESS_CTRL_DB.delete_user(username)
+ except UserDoesNotExist:
+ raise cherrypy.HTTPError(404)
+ mgr.ACCESS_CTRL_DB.save()
+
+ def set(self, username, password=None, name=None, email=None, roles=None):
+ try:
+ user = mgr.ACCESS_CTRL_DB.get_user(username)
+ except UserDoesNotExist:
+ raise cherrypy.HTTPError(404)
+ user_roles = []
+ if roles:
+ user_roles = User._get_user_roles(roles)
+ if password:
+ user.set_password(password)
+ user.name = name
+ user.email = email
+ user.set_roles(user_roles)
+ mgr.ACCESS_CTRL_DB.save()
+ return User._user_to_dict(user)