summaryrefslogtreecommitdiffstats
path: root/qa/tasks/mgr
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /qa/tasks/mgr
parentInitial commit. (diff)
downloadceph-b26c4052f3542036551aa9dec9caa4226e456195.tar.xz
ceph-b26c4052f3542036551aa9dec9caa4226e456195.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--qa/tasks/mgr/__init__.py0
-rw-r--r--qa/tasks/mgr/dashboard/__init__.py1
-rw-r--r--qa/tasks/mgr/dashboard/helper.py724
-rw-r--r--qa/tasks/mgr/dashboard/test_api.py20
-rw-r--r--qa/tasks/mgr/dashboard/test_auth.py352
-rw-r--r--qa/tasks/mgr/dashboard/test_cephfs.py292
-rw-r--r--qa/tasks/mgr/dashboard/test_cluster.py23
-rw-r--r--qa/tasks/mgr/dashboard/test_cluster_configuration.py398
-rw-r--r--qa/tasks/mgr/dashboard/test_crush_rule.py84
-rw-r--r--qa/tasks/mgr/dashboard/test_erasure_code_profile.py105
-rw-r--r--qa/tasks/mgr/dashboard/test_feedback.py36
-rw-r--r--qa/tasks/mgr/dashboard/test_health.py309
-rw-r--r--qa/tasks/mgr/dashboard/test_host.py158
-rw-r--r--qa/tasks/mgr/dashboard/test_logs.py34
-rw-r--r--qa/tasks/mgr/dashboard/test_mgr_module.py154
-rw-r--r--qa/tasks/mgr/dashboard/test_monitor.py24
-rw-r--r--qa/tasks/mgr/dashboard/test_motd.py37
-rw-r--r--qa/tasks/mgr/dashboard/test_orchestrator.py27
-rw-r--r--qa/tasks/mgr/dashboard/test_osd.py368
-rw-r--r--qa/tasks/mgr/dashboard/test_perf_counters.py71
-rw-r--r--qa/tasks/mgr/dashboard/test_pool.py435
-rw-r--r--qa/tasks/mgr/dashboard/test_rbd.py978
-rw-r--r--qa/tasks/mgr/dashboard/test_rbd_mirroring.py195
-rw-r--r--qa/tasks/mgr/dashboard/test_requests.py32
-rw-r--r--qa/tasks/mgr/dashboard/test_rgw.py868
-rw-r--r--qa/tasks/mgr/dashboard/test_role.py145
-rw-r--r--qa/tasks/mgr/dashboard/test_settings.py65
-rw-r--r--qa/tasks/mgr/dashboard/test_summary.py39
-rw-r--r--qa/tasks/mgr/dashboard/test_telemetry.py98
-rw-r--r--qa/tasks/mgr/dashboard/test_user.py565
-rw-r--r--qa/tasks/mgr/mgr_test_case.py228
-rw-r--r--qa/tasks/mgr/test_cache.py83
-rw-r--r--qa/tasks/mgr/test_crash.py108
-rw-r--r--qa/tasks/mgr/test_dashboard.py177
-rw-r--r--qa/tasks/mgr/test_failover.py182
-rw-r--r--qa/tasks/mgr/test_insights.py192
-rw-r--r--qa/tasks/mgr/test_module_selftest.py254
-rw-r--r--qa/tasks/mgr/test_orchestrator_cli.py250
-rw-r--r--qa/tasks/mgr/test_progress.py423
-rw-r--r--qa/tasks/mgr/test_prometheus.py79
40 files changed, 8613 insertions, 0 deletions
diff --git a/qa/tasks/mgr/__init__.py b/qa/tasks/mgr/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/qa/tasks/mgr/__init__.py
diff --git a/qa/tasks/mgr/dashboard/__init__.py b/qa/tasks/mgr/dashboard/__init__.py
new file mode 100644
index 000000000..2b022e024
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/__init__.py
@@ -0,0 +1 @@
+DEFAULT_API_VERSION = '1.0'
diff --git a/qa/tasks/mgr/dashboard/helper.py b/qa/tasks/mgr/dashboard/helper.py
new file mode 100644
index 000000000..d80e238a2
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/helper.py
@@ -0,0 +1,724 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=W0212,too-many-return-statements,too-many-public-methods
+from __future__ import absolute_import
+
+import json
+import logging
+import random
+import re
+import string
+import time
+from collections import namedtuple
+from typing import List
+
+import requests
+from tasks.mgr.mgr_test_case import MgrTestCase
+from teuthology.exceptions import \
+ CommandFailedError # pylint: disable=import-error
+
+from . import DEFAULT_API_VERSION
+
+log = logging.getLogger(__name__)
+
+
+class DashboardTestCase(MgrTestCase):
+ # Display full error diffs
+ maxDiff = None
+
+ # Increased x3 (20 -> 60)
+ TIMEOUT_HEALTH_CLEAR = 60
+
+ MGRS_REQUIRED = 2
+ MDSS_REQUIRED = 1
+ REQUIRE_FILESYSTEM = True
+ CLIENTS_REQUIRED = 1
+ CEPHFS = False
+ ORCHESTRATOR = False
+ ORCHESTRATOR_TEST_DATA = {
+ 'inventory': [
+ {
+ 'name': 'test-host0',
+ 'addr': '1.2.3.4',
+ 'devices': [
+ {
+ 'path': '/dev/sda',
+ }
+ ]
+ },
+ {
+ 'name': 'test-host1',
+ 'addr': '1.2.3.5',
+ 'devices': [
+ {
+ 'path': '/dev/sdb',
+ }
+ ]
+ }
+ ],
+ 'daemons': [
+ {
+ 'nodename': 'test-host0',
+ 'daemon_type': 'mon',
+ 'daemon_id': 'a'
+ },
+ {
+ 'nodename': 'test-host0',
+ 'daemon_type': 'mgr',
+ 'daemon_id': 'x'
+ },
+ {
+ 'nodename': 'test-host0',
+ 'daemon_type': 'osd',
+ 'daemon_id': '0'
+ },
+ {
+ 'nodename': 'test-host1',
+ 'daemon_type': 'osd',
+ 'daemon_id': '1'
+ }
+ ]
+ }
+
+ _session = None # type: requests.sessions.Session
+ _token = None
+ _resp = None # type: requests.models.Response
+ _loggedin = False
+ _base_uri = None
+
+ AUTO_AUTHENTICATE = True
+
+ AUTH_ROLES = ['administrator']
+
+ @classmethod
+ def create_user(cls, username, password, roles=None,
+ force_password=True, cmd_args=None):
+ # pylint: disable=too-many-arguments
+ """
+ :param username: The name of the user.
+ :type username: str
+ :param password: The password.
+ :type password: str
+ :param roles: A list of roles.
+ :type roles: list
+ :param force_password: Force the use of the specified password. This
+ will bypass the password complexity check. Defaults to 'True'.
+ :type force_password: bool
+ :param cmd_args: Additional command line arguments for the
+ 'ac-user-create' command.
+ :type cmd_args: None | list[str]
+ """
+ try:
+ cls._ceph_cmd(['dashboard', 'ac-user-show', username])
+ cls._ceph_cmd(['dashboard', 'ac-user-delete', username])
+ except CommandFailedError as ex:
+ if ex.exitstatus != 2:
+ raise ex
+
+ user_create_args = [
+ 'dashboard', 'ac-user-create', username
+ ]
+ if force_password:
+ user_create_args.append('--force-password')
+ if cmd_args:
+ user_create_args.extend(cmd_args)
+ cls._ceph_cmd_with_secret(user_create_args, password)
+ if roles:
+ set_roles_args = ['dashboard', 'ac-user-set-roles', username]
+ for idx, role in enumerate(roles):
+ if isinstance(role, str):
+ set_roles_args.append(role)
+ else:
+ assert isinstance(role, dict)
+ rolename = 'test_role_{}'.format(idx)
+ try:
+ cls._ceph_cmd(['dashboard', 'ac-role-show', rolename])
+ cls._ceph_cmd(['dashboard', 'ac-role-delete', rolename])
+ except CommandFailedError as ex:
+ if ex.exitstatus != 2:
+ raise ex
+ cls._ceph_cmd(['dashboard', 'ac-role-create', rolename])
+ for mod, perms in role.items():
+ args = ['dashboard', 'ac-role-add-scope-perms', rolename, mod]
+ args.extend(perms)
+ cls._ceph_cmd(args)
+ set_roles_args.append(rolename)
+ cls._ceph_cmd(set_roles_args)
+
+ @classmethod
+ def create_pool(cls, name, pg_num, pool_type, application='rbd'):
+ data = {
+ 'pool': name,
+ 'pg_num': pg_num,
+ 'pool_type': pool_type,
+ 'application_metadata': [application]
+ }
+ if pool_type == 'erasure':
+ data['flags'] = ['ec_overwrites']
+ cls._task_post("/api/pool", data)
+
+ @classmethod
+ def login(cls, username, password, set_cookies=False):
+ if cls._loggedin:
+ cls.logout()
+ cls._post('/api/auth', {'username': username,
+ 'password': password}, set_cookies=set_cookies)
+ cls._assertEq(cls._resp.status_code, 201)
+ cls._token = cls.jsonBody()['token']
+ cls._loggedin = True
+
+ @classmethod
+ def logout(cls, set_cookies=False):
+ if cls._loggedin:
+ cls._post('/api/auth/logout', set_cookies=set_cookies)
+ cls._assertEq(cls._resp.status_code, 200)
+ cls._token = None
+ cls._loggedin = False
+
+ @classmethod
+ def delete_user(cls, username, roles=None):
+ if roles is None:
+ roles = []
+ cls._ceph_cmd(['dashboard', 'ac-user-delete', username])
+ for idx, role in enumerate(roles):
+ if isinstance(role, dict):
+ cls._ceph_cmd(['dashboard', 'ac-role-delete', 'test_role_{}'.format(idx)])
+
+ @classmethod
+ def RunAs(cls, username, password, roles=None, force_password=True,
+ cmd_args=None, login=True):
+ # pylint: disable=too-many-arguments
+ def wrapper(func):
+ def execute(self, *args, **kwargs):
+ self.create_user(username, password, roles,
+ force_password, cmd_args)
+ if login:
+ self.login(username, password)
+ res = func(self, *args, **kwargs)
+ if login:
+ self.logout()
+ self.delete_user(username, roles)
+ return res
+
+ return execute
+
+ return wrapper
+
+ @classmethod
+ def set_jwt_token(cls, token):
+ cls._token = token
+
+ @classmethod
+ def setUpClass(cls):
+ super(DashboardTestCase, cls).setUpClass()
+ cls._assign_ports("dashboard", "ssl_server_port")
+ cls._load_module("dashboard")
+ cls.update_base_uri()
+
+ if cls.CEPHFS:
+ cls.mds_cluster.clear_firewall()
+
+ # To avoid any issues with e.g. unlink bugs, we destroy and recreate
+ # the filesystem rather than just doing a rm -rf of files
+ cls.mds_cluster.mds_stop()
+ cls.mds_cluster.mds_fail()
+ cls.mds_cluster.delete_all_filesystems()
+ cls.fs = None # is now invalid!
+
+ cls.fs = cls.mds_cluster.newfs(create=True)
+ cls.fs.mds_restart()
+
+ # In case some test messed with auth caps, reset them
+ # pylint: disable=not-an-iterable
+ client_mount_ids = [m.client_id for m in cls.mounts]
+ for client_id in client_mount_ids:
+ cls.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ 'auth', 'caps', "client.{0}".format(client_id),
+ 'mds', 'allow',
+ 'mon', 'allow r',
+ 'osd', 'allow rw pool={0}'.format(cls.fs.get_data_pool_name()))
+
+ # wait for mds restart to complete...
+ cls.fs.wait_for_daemons()
+
+ if cls.ORCHESTRATOR:
+ cls._load_module("test_orchestrator")
+
+ cmd = ['orch', 'set', 'backend', 'test_orchestrator']
+ cls.mgr_cluster.mon_manager.raw_cluster_cmd(*cmd)
+
+ cmd = ['test_orchestrator', 'load_data', '-i', '-']
+ cls.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd, stdin=json.dumps(
+ cls.ORCHESTRATOR_TEST_DATA
+ ))
+
+ cls._token = None
+ cls._session = requests.Session()
+ cls._resp = None
+
+ cls.create_user('admin', 'admin', cls.AUTH_ROLES)
+ if cls.AUTO_AUTHENTICATE:
+ cls.login('admin', 'admin')
+
+ @classmethod
+ def update_base_uri(cls):
+ if cls._base_uri is None:
+ cls._base_uri = cls._get_uri("dashboard").rstrip('/')
+
+ def setUp(self):
+ super(DashboardTestCase, self).setUp()
+ if not self._loggedin and self.AUTO_AUTHENTICATE:
+ self.login('admin', 'admin')
+ self.wait_for_health_clear(self.TIMEOUT_HEALTH_CLEAR)
+
+ @classmethod
+ def tearDownClass(cls):
+ super(DashboardTestCase, cls).tearDownClass()
+
+ # pylint: disable=inconsistent-return-statements, too-many-arguments, too-many-branches
+ @classmethod
+ def _request(cls, url, method, data=None, params=None, version=DEFAULT_API_VERSION,
+ set_cookies=False, headers=None):
+ url = "{}{}".format(cls._base_uri, url)
+ log.debug("Request %s to %s", method, url)
+ if headers is None:
+ headers = {}
+ cookies = {}
+ if cls._token:
+ if set_cookies:
+ cookies['token'] = cls._token
+ else:
+ headers['Authorization'] = "Bearer {}".format(cls._token)
+ if version is None:
+ headers['Accept'] = 'application/json'
+ else:
+ headers['Accept'] = 'application/vnd.ceph.api.v{}+json'.format(version)
+
+ if set_cookies:
+ if method == 'GET':
+ cls._resp = cls._session.get(url, params=params, verify=False,
+ headers=headers, cookies=cookies)
+ elif method == 'POST':
+ cls._resp = cls._session.post(url, json=data, params=params,
+ verify=False, headers=headers, cookies=cookies)
+ elif method == 'DELETE':
+ cls._resp = cls._session.delete(url, json=data, params=params,
+ verify=False, headers=headers, cookies=cookies)
+ elif method == 'PUT':
+ cls._resp = cls._session.put(url, json=data, params=params,
+ verify=False, headers=headers, cookies=cookies)
+ else:
+ assert False
+ else:
+ if method == 'GET':
+ cls._resp = cls._session.get(url, params=params, verify=False,
+ headers=headers)
+ elif method == 'POST':
+ cls._resp = cls._session.post(url, json=data, params=params,
+ verify=False, headers=headers)
+ elif method == 'DELETE':
+ cls._resp = cls._session.delete(url, json=data, params=params,
+ verify=False, headers=headers)
+ elif method == 'PUT':
+ cls._resp = cls._session.put(url, json=data, params=params,
+ verify=False, headers=headers)
+ else:
+ assert False
+ try:
+ if not cls._resp.ok:
+ # Output response for easier debugging.
+ log.error("Request response: %s", cls._resp.text)
+ content_type = cls._resp.headers['content-type']
+ if re.match(r'^application/.*json',
+ content_type) and cls._resp.text and cls._resp.text != "":
+ return cls._resp.json()
+ return cls._resp.text
+ except ValueError as ex:
+ log.exception("Failed to decode response: %s", cls._resp.text)
+ raise ex
+
+ @classmethod
+ def _get(cls, url, params=None, version=DEFAULT_API_VERSION, set_cookies=False, headers=None):
+ return cls._request(url, 'GET', params=params, version=version,
+ set_cookies=set_cookies, headers=headers)
+
+ @classmethod
+ def _view_cache_get(cls, url, retries=5):
+ retry = True
+ while retry and retries > 0:
+ retry = False
+ res = cls._get(url, version=DEFAULT_API_VERSION)
+ if isinstance(res, dict):
+ res = [res]
+ for view in res:
+ assert 'value' in view
+ if not view['value']:
+ retry = True
+ retries -= 1
+ if retries == 0:
+ raise Exception("{} view cache exceeded number of retries={}"
+ .format(url, retries))
+ return res
+
+ @classmethod
+ def _post(cls, url, data=None, params=None, version=DEFAULT_API_VERSION, set_cookies=False):
+ cls._request(url, 'POST', data, params, version=version, set_cookies=set_cookies)
+
+ @classmethod
+ def _delete(cls, url, data=None, params=None, version=DEFAULT_API_VERSION, set_cookies=False):
+ cls._request(url, 'DELETE', data, params, version=version, set_cookies=set_cookies)
+
+ @classmethod
+ def _put(cls, url, data=None, params=None, version=DEFAULT_API_VERSION, set_cookies=False):
+ cls._request(url, 'PUT', data, params, version=version, set_cookies=set_cookies)
+
+ @classmethod
+ def _assertEq(cls, v1, v2):
+ if not v1 == v2:
+ raise Exception("assertion failed: {} != {}".format(v1, v2))
+
+ @classmethod
+ def _assertIn(cls, v1, v2):
+ if v1 not in v2:
+ raise Exception("assertion failed: {} not in {}".format(v1, v2))
+
+ @classmethod
+ def _assertIsInst(cls, v1, v2):
+ if not isinstance(v1, v2):
+ raise Exception("assertion failed: {} not instance of {}".format(v1, v2))
+
+ # pylint: disable=too-many-arguments
+ @classmethod
+ def _task_request(cls, method, url, data, timeout, version=DEFAULT_API_VERSION,
+ set_cookies=False):
+ res = cls._request(url, method, data, version=version, set_cookies=set_cookies)
+ cls._assertIn(cls._resp.status_code, [200, 201, 202, 204, 400, 403, 404])
+
+ if cls._resp.status_code == 403:
+ return None
+
+ if cls._resp.status_code != 202:
+ log.debug("task finished immediately")
+ return res
+
+ cls._assertIn('name', res)
+ cls._assertIn('metadata', res)
+ task_name = res['name']
+ task_metadata = res['metadata']
+
+ retries = int(timeout)
+ res_task = None
+ while retries > 0 and not res_task:
+ retries -= 1
+ log.debug("task (%s, %s) is still executing", task_name, task_metadata)
+ time.sleep(1)
+ _res = cls._get('/api/task?name={}'.format(task_name), version=version)
+ cls._assertEq(cls._resp.status_code, 200)
+ executing_tasks = [task for task in _res['executing_tasks'] if
+ task['metadata'] == task_metadata]
+ finished_tasks = [task for task in _res['finished_tasks'] if
+ task['metadata'] == task_metadata]
+ if not executing_tasks and finished_tasks:
+ res_task = finished_tasks[0]
+
+ if retries <= 0:
+ raise Exception("Waiting for task ({}, {}) to finish timed out. {}"
+ .format(task_name, task_metadata, _res))
+
+ log.debug("task (%s, %s) finished", task_name, task_metadata)
+ if res_task['success']:
+ if method == 'POST':
+ cls._resp.status_code = 201
+ elif method == 'PUT':
+ cls._resp.status_code = 200
+ elif method == 'DELETE':
+ cls._resp.status_code = 204
+ return res_task['ret_value']
+
+ if 'status' in res_task['exception']:
+ cls._resp.status_code = res_task['exception']['status']
+ else:
+ cls._resp.status_code = 500
+ return res_task['exception']
+
+ @classmethod
+ def _task_post(cls, url, data=None, timeout=60, version=DEFAULT_API_VERSION, set_cookies=False):
+ return cls._task_request('POST', url, data, timeout, version=version,
+ set_cookies=set_cookies)
+
+ @classmethod
+ def _task_delete(cls, url, timeout=60, version=DEFAULT_API_VERSION, set_cookies=False):
+ return cls._task_request('DELETE', url, None, timeout, version=version,
+ set_cookies=set_cookies)
+
+ @classmethod
+ def _task_put(cls, url, data=None, timeout=60, version=DEFAULT_API_VERSION, set_cookies=False):
+ return cls._task_request('PUT', url, data, timeout, version=version,
+ set_cookies=set_cookies)
+
+ @classmethod
+ def cookies(cls):
+ return cls._resp.cookies
+
+ @classmethod
+ def jsonBody(cls):
+ return cls._resp.json()
+
+ @classmethod
+ def reset_session(cls):
+ cls._session = requests.Session()
+
+ def assertSubset(self, data, biggerData):
+ for key, value in data.items():
+ self.assertEqual(biggerData[key], value)
+
+ def assertJsonBody(self, data):
+ body = self._resp.json()
+ self.assertEqual(body, data)
+
+ def assertJsonSubset(self, data):
+ self.assertSubset(data, self._resp.json())
+
+ def assertSchema(self, data, schema):
+ try:
+ return _validate_json(data, schema)
+ except _ValError as e:
+ self.assertEqual(data, str(e))
+
+ def assertSchemaBody(self, schema):
+ self.assertSchema(self.jsonBody(), schema)
+
+ def assertBody(self, body):
+ self.assertEqual(self._resp.text, body)
+
+ def assertStatus(self, status):
+ if isinstance(status, list):
+ self.assertIn(self._resp.status_code, status)
+ else:
+ self.assertEqual(self._resp.status_code, status)
+
+ def assertHeaders(self, headers):
+ for name, value in headers.items():
+ self.assertIn(name, self._resp.headers)
+ self.assertEqual(self._resp.headers[name], value)
+
+ def assertError(self, code=None, component=None, detail=None):
+ body = self._resp.json()
+ if code:
+ self.assertEqual(body['code'], code)
+ if component:
+ self.assertEqual(body['component'], component)
+ if detail:
+ self.assertEqual(body['detail'], detail)
+
+ @classmethod
+ def _ceph_cmd(cls, cmd):
+ res = cls.mgr_cluster.mon_manager.raw_cluster_cmd(*cmd)
+ log.debug("command result: %s", res)
+ return res
+
+ @classmethod
+ def _ceph_cmd_result(cls, cmd):
+ exitstatus = cls.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd)
+ log.debug("command exit status: %d", exitstatus)
+ return exitstatus
+
+ @classmethod
+ def _ceph_cmd_with_secret(cls, cmd: List[str], secret: str, return_exit_code: bool = False):
+ cmd.append('-i')
+ cmd.append('{}'.format(cls._ceph_create_tmp_file(secret)))
+ if return_exit_code:
+ return cls._ceph_cmd_result(cmd)
+ return cls._ceph_cmd(cmd)
+
+ @classmethod
+ def _ceph_create_tmp_file(cls, content: str) -> str:
+ """Create a temporary file in the remote cluster"""
+ file_name = ''.join(random.choices(string.ascii_letters + string.digits, k=20))
+ file_path = '/tmp/{}'.format(file_name)
+ cls._cmd(['sh', '-c', 'echo -n {} > {}'.format(content, file_path)])
+ return file_path
+
+ def set_config_key(self, key, value):
+ self._ceph_cmd(['config-key', 'set', key, value])
+
+ def get_config_key(self, key):
+ return self._ceph_cmd(['config-key', 'get', key])
+
+ @classmethod
+ def _cmd(cls, args):
+ return cls.mgr_cluster.admin_remote.run(args=args)
+
+ @classmethod
+ def _rbd_cmd(cls, cmd):
+ args = ['rbd']
+ args.extend(cmd)
+ cls._cmd(args)
+
+ @classmethod
+ def _radosgw_admin_cmd(cls, cmd):
+ args = ['radosgw-admin']
+ args.extend(cmd)
+ cls._cmd(args)
+
+ @classmethod
+ def _rados_cmd(cls, cmd):
+ args = ['rados']
+ args.extend(cmd)
+ cls._cmd(args)
+
+ @classmethod
+ def mons(cls):
+ out = cls.ceph_cluster.mon_manager.raw_cluster_cmd('quorum_status')
+ j = json.loads(out)
+ return [mon['name'] for mon in j['monmap']['mons']]
+
+ @classmethod
+ def find_object_in_list(cls, key, value, iterable):
+ """
+ Get the first occurrence of an object within a list with
+ the specified key/value.
+ :param key: The name of the key.
+ :param value: The value to search for.
+ :param iterable: The list to process.
+ :return: Returns the found object or None.
+ """
+ for obj in iterable:
+ if key in obj and obj[key] == value:
+ return obj
+ return None
+
+
+# TODP: pass defaults=(False,) to namedtuple() if python3.7
+class JLeaf(namedtuple('JLeaf', ['typ', 'none'])):
+ def __new__(cls, typ, none=False):
+ return super().__new__(cls, typ, none)
+
+
+JList = namedtuple('JList', ['elem_typ'])
+
+JTuple = namedtuple('JTuple', ['elem_typs'])
+
+JUnion = namedtuple('JUnion', ['elem_typs'])
+
+
+class JObj(namedtuple('JObj', ['sub_elems', 'allow_unknown', 'none', 'unknown_schema'])):
+ def __new__(cls, sub_elems, allow_unknown=False, none=False, unknown_schema=None):
+ """
+ :type sub_elems: dict[str, JAny | JLeaf | JList | JObj | type]
+ :type allow_unknown: bool
+ :type none: bool
+ :type unknown_schema: int, str, JAny | JLeaf | JList | JObj
+ :return:
+ """
+ return super(JObj, cls).__new__(cls, sub_elems, allow_unknown, none, unknown_schema)
+
+
+JAny = namedtuple('JAny', ['none'])
+
+module_options_object_schema = JObj({
+ 'name': str,
+ 'type': str,
+ 'level': str,
+ 'flags': int,
+ 'default_value': JAny(none=True),
+ 'min': JAny(none=False),
+ 'max': JAny(none=False),
+ 'enum_allowed': JList(str),
+ 'see_also': JList(str),
+ 'desc': str,
+ 'long_desc': str,
+ 'tags': JList(str),
+})
+
+module_options_schema = JObj(
+ {},
+ allow_unknown=True,
+ unknown_schema=module_options_object_schema)
+
+addrvec_schema = JList(JObj({
+ 'addr': str,
+ 'nonce': int,
+ 'type': str
+}))
+
+devices_schema = JList(JObj({
+ 'daemons': JList(str),
+ 'devid': str,
+ 'location': JList(JObj({
+ 'host': str,
+ 'dev': str,
+ 'path': str
+ }))
+}, allow_unknown=True))
+
+
+class _ValError(Exception):
+ def __init__(self, msg, path):
+ path_str = ''.join('[{}]'.format(repr(p)) for p in path)
+ super(_ValError, self).__init__('In `input{}`: {}'.format(path_str, msg))
+
+
+# pylint: disable=dangerous-default-value,inconsistent-return-statements,too-many-branches
+def _validate_json(val, schema, path=[]):
+ """
+ >>> d = {'a': 1, 'b': 'x', 'c': range(10)}
+ ... ds = JObj({'a': int, 'b': str, 'c': JList(int)})
+ ... _validate_json(d, ds)
+ True
+ >>> _validate_json({'num': 1}, JObj({'num': JUnion([int,float])}))
+ True
+ >>> _validate_json({'num': 'a'}, JObj({'num': JUnion([int,float])}))
+ False
+ """
+ if isinstance(schema, JAny):
+ if not schema.none and val is None:
+ raise _ValError('val is None', path)
+ return True
+ if isinstance(schema, JLeaf):
+ if schema.none and val is None:
+ return True
+ if not isinstance(val, schema.typ):
+ raise _ValError('val not of type {}'.format(schema.typ), path)
+ return True
+ if isinstance(schema, JList):
+ if not isinstance(val, list):
+ raise _ValError('val="{}" is not a list'.format(val), path)
+ return all(_validate_json(e, schema.elem_typ, path + [i]) for i, e in enumerate(val))
+ if isinstance(schema, JTuple):
+ return all(_validate_json(val[i], typ, path + [i])
+ for i, typ in enumerate(schema.elem_typs))
+ if isinstance(schema, JUnion):
+ for typ in schema.elem_typs:
+ try:
+ if _validate_json(val, typ, path):
+ return True
+ except _ValError:
+ pass
+ return False
+ if isinstance(schema, JObj):
+ if val is None and schema.none:
+ return True
+ if val is None:
+ raise _ValError('val is None', path)
+ if not hasattr(val, 'keys'):
+ raise _ValError('val="{}" is not a dict'.format(val), path)
+ missing_keys = set(schema.sub_elems.keys()).difference(set(val.keys()))
+ if missing_keys:
+ raise _ValError('missing keys: {}'.format(missing_keys), path)
+ unknown_keys = set(val.keys()).difference(set(schema.sub_elems.keys()))
+ if not schema.allow_unknown and unknown_keys:
+ raise _ValError('unknown keys: {}'.format(unknown_keys), path)
+ result = all(
+ _validate_json(val[key], sub_schema, path + [key])
+ for key, sub_schema in schema.sub_elems.items()
+ )
+ if unknown_keys and schema.allow_unknown and schema.unknown_schema:
+ result += all(
+ _validate_json(val[key], schema.unknown_schema, path + [key])
+ for key in unknown_keys
+ )
+ return result
+ if schema in [str, int, float, bool]:
+ return _validate_json(val, JLeaf(schema), path)
+
+ assert False, str(path)
diff --git a/qa/tasks/mgr/dashboard/test_api.py b/qa/tasks/mgr/dashboard/test_api.py
new file mode 100644
index 000000000..22f235698
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_api.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+import unittest
+
+from . import DEFAULT_API_VERSION
+from .helper import DashboardTestCase
+
+
+class VersionReqTest(DashboardTestCase, unittest.TestCase):
+ def test_version(self):
+ for (version, expected_status) in [
+ (DEFAULT_API_VERSION, 200),
+ (None, 415),
+ ("99.99", 415)
+ ]:
+ with self.subTest(version=version):
+ self._get('/api/summary', version=version)
+ self.assertStatus(expected_status)
diff --git a/qa/tasks/mgr/dashboard/test_auth.py b/qa/tasks/mgr/dashboard/test_auth.py
new file mode 100644
index 000000000..a2266229b
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_auth.py
@@ -0,0 +1,352 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+import time
+
+import jwt
+from teuthology.orchestra.run import \
+ CommandFailedError # pylint: disable=import-error
+
+from .helper import DashboardTestCase, JLeaf, JObj
+
+
+class AuthTest(DashboardTestCase):
+
+ AUTO_AUTHENTICATE = False
+
+ def setUp(self):
+ super(AuthTest, self).setUp()
+ self.reset_session()
+
+ def _validate_jwt_token(self, token, username, permissions):
+ payload = jwt.decode(token, options={'verify_signature': False})
+ self.assertIn('username', payload)
+ self.assertEqual(payload['username'], username)
+
+ for scope, perms in permissions.items():
+ self.assertIsNotNone(scope)
+ self.assertIn('read', perms)
+ self.assertIn('update', perms)
+ self.assertIn('create', perms)
+ self.assertIn('delete', perms)
+
+ def test_login_without_password(self):
+ with self.assertRaises(CommandFailedError):
+ self.create_user('admin2', '', ['administrator'], force_password=True)
+
+ def test_a_set_login_credentials(self):
+ # test with Authorization header
+ self.create_user('admin2', 'admin2', ['administrator'])
+ self._post("/api/auth", {'username': 'admin2', 'password': 'admin2'})
+ self.assertStatus(201)
+ data = self.jsonBody()
+ self._validate_jwt_token(data['token'], "admin2", data['permissions'])
+ self.delete_user('admin2')
+
+ # test with Cookies set
+ self.create_user('admin2', 'admin2', ['administrator'])
+ self._post("/api/auth", {'username': 'admin2', 'password': 'admin2'}, set_cookies=True)
+ self.assertStatus(201)
+ data = self.jsonBody()
+ self._validate_jwt_token(data['token'], "admin2", data['permissions'])
+ self.delete_user('admin2')
+
+ def test_login_valid(self):
+ # test with Authorization header
+ self._post("/api/auth", {'username': 'admin', 'password': 'admin'})
+ self.assertStatus(201)
+ data = self.jsonBody()
+ self.assertSchema(data, JObj(sub_elems={
+ 'token': JLeaf(str),
+ 'username': JLeaf(str),
+ 'permissions': JObj(sub_elems={}, allow_unknown=True),
+ 'sso': JLeaf(bool),
+ 'pwdExpirationDate': JLeaf(int, none=True),
+ 'pwdUpdateRequired': JLeaf(bool)
+ }, allow_unknown=False))
+ self._validate_jwt_token(data['token'], "admin", data['permissions'])
+
+ # test with Cookies set
+ self._post("/api/auth", {'username': 'admin', 'password': 'admin'}, set_cookies=True)
+ self.assertStatus(201)
+ data = self.jsonBody()
+ self.assertSchema(data, JObj(sub_elems={
+ 'token': JLeaf(str),
+ 'username': JLeaf(str),
+ 'permissions': JObj(sub_elems={}, allow_unknown=True),
+ 'sso': JLeaf(bool),
+ 'pwdExpirationDate': JLeaf(int, none=True),
+ 'pwdUpdateRequired': JLeaf(bool)
+ }, allow_unknown=False))
+ self._validate_jwt_token(data['token'], "admin", data['permissions'])
+
+ def test_login_invalid(self):
+ # test with Authorization header
+ self._post("/api/auth", {'username': 'admin', 'password': 'inval'})
+ self.assertStatus(400)
+ self.assertJsonBody({
+ "component": "auth",
+ "code": "invalid_credentials",
+ "detail": "Invalid credentials"
+ })
+
+ def test_lockout_user(self):
+ # test with Authorization header
+ self._ceph_cmd(['dashboard', 'set-account-lockout-attempts', '3'])
+ for _ in range(3):
+ self._post("/api/auth", {'username': 'admin', 'password': 'inval'})
+ self._post("/api/auth", {'username': 'admin', 'password': 'admin'})
+ self.assertStatus(400)
+ self.assertJsonBody({
+ "component": "auth",
+ "code": "invalid_credentials",
+ "detail": "Invalid credentials"
+ })
+ self._ceph_cmd(['dashboard', 'ac-user-enable', 'admin'])
+ self._post("/api/auth", {'username': 'admin', 'password': 'admin'})
+ self.assertStatus(201)
+ data = self.jsonBody()
+ self.assertSchema(data, JObj(sub_elems={
+ 'token': JLeaf(str),
+ 'username': JLeaf(str),
+ 'permissions': JObj(sub_elems={}, allow_unknown=True),
+ 'sso': JLeaf(bool),
+ 'pwdExpirationDate': JLeaf(int, none=True),
+ 'pwdUpdateRequired': JLeaf(bool)
+ }, allow_unknown=False))
+ self._validate_jwt_token(data['token'], "admin", data['permissions'])
+
+ # test with Cookies set
+ self._ceph_cmd(['dashboard', 'set-account-lockout-attempts', '3'])
+ for _ in range(3):
+ self._post("/api/auth", {'username': 'admin', 'password': 'inval'}, set_cookies=True)
+ self._post("/api/auth", {'username': 'admin', 'password': 'admin'}, set_cookies=True)
+ self.assertStatus(400)
+ self.assertJsonBody({
+ "component": "auth",
+ "code": "invalid_credentials",
+ "detail": "Invalid credentials"
+ })
+ self._ceph_cmd(['dashboard', 'ac-user-enable', 'admin'])
+ self._post("/api/auth", {'username': 'admin', 'password': 'admin'}, set_cookies=True)
+ self.assertStatus(201)
+ data = self.jsonBody()
+ self.assertSchema(data, JObj(sub_elems={
+ 'token': JLeaf(str),
+ 'username': JLeaf(str),
+ 'permissions': JObj(sub_elems={}, allow_unknown=True),
+ 'sso': JLeaf(bool),
+ 'pwdExpirationDate': JLeaf(int, none=True),
+ 'pwdUpdateRequired': JLeaf(bool)
+ }, allow_unknown=False))
+ self._validate_jwt_token(data['token'], "admin", data['permissions'])
+
+ def test_logout(self):
+ # test with Authorization header
+ self._post("/api/auth", {'username': 'admin', 'password': 'admin'})
+ self.assertStatus(201)
+ data = self.jsonBody()
+ self._validate_jwt_token(data['token'], "admin", data['permissions'])
+ self.set_jwt_token(data['token'])
+ self._post("/api/auth/logout")
+ self.assertStatus(200)
+ self.assertJsonBody({
+ "redirect_url": "#/login"
+ })
+ self._get("/api/host", version='1.1')
+ self.assertStatus(401)
+ self.set_jwt_token(None)
+
+ # test with Cookies set
+ self._post("/api/auth", {'username': 'admin', 'password': 'admin'}, set_cookies=True)
+ self.assertStatus(201)
+ data = self.jsonBody()
+ self._validate_jwt_token(data['token'], "admin", data['permissions'])
+ self.set_jwt_token(data['token'])
+ self._post("/api/auth/logout", set_cookies=True)
+ self.assertStatus(200)
+ self.assertJsonBody({
+ "redirect_url": "#/login"
+ })
+ self._get("/api/host", set_cookies=True, version='1.1')
+ self.assertStatus(401)
+ self.set_jwt_token(None)
+
+ def test_token_ttl(self):
+ # test with Authorization header
+ self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '5'])
+ self._post("/api/auth", {'username': 'admin', 'password': 'admin'})
+ self.assertStatus(201)
+ self.set_jwt_token(self.jsonBody()['token'])
+ self._get("/api/host", version='1.1')
+ self.assertStatus(200)
+ time.sleep(6)
+ self._get("/api/host", version='1.1')
+ self.assertStatus(401)
+ self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '28800'])
+ self.set_jwt_token(None)
+
+ # test with Cookies set
+ self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '5'])
+ self._post("/api/auth", {'username': 'admin', 'password': 'admin'}, set_cookies=True)
+ self.assertStatus(201)
+ self.set_jwt_token(self.jsonBody()['token'])
+ self._get("/api/host", set_cookies=True, version='1.1')
+ self.assertStatus(200)
+ time.sleep(6)
+ self._get("/api/host", set_cookies=True, version='1.1')
+ self.assertStatus(401)
+ self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '28800'])
+ self.set_jwt_token(None)
+
+ def test_remove_from_blocklist(self):
+ # test with Authorization header
+ self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '5'])
+ self._post("/api/auth", {'username': 'admin', 'password': 'admin'})
+ self.assertStatus(201)
+ self.set_jwt_token(self.jsonBody()['token'])
+ # the following call adds the token to the blocklist
+ self._post("/api/auth/logout")
+ self.assertStatus(200)
+ self._get("/api/host", version='1.1')
+ self.assertStatus(401)
+ time.sleep(6)
+ self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '28800'])
+ self.set_jwt_token(None)
+ self._post("/api/auth", {'username': 'admin', 'password': 'admin'})
+ self.assertStatus(201)
+ self.set_jwt_token(self.jsonBody()['token'])
+ # the following call removes expired tokens from the blocklist
+ self._post("/api/auth/logout")
+ self.assertStatus(200)
+
+ # test with Cookies set
+ self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '5'])
+ self._post("/api/auth", {'username': 'admin', 'password': 'admin'}, set_cookies=True)
+ self.assertStatus(201)
+ self.set_jwt_token(self.jsonBody()['token'])
+ # the following call adds the token to the blocklist
+ self._post("/api/auth/logout", set_cookies=True)
+ self.assertStatus(200)
+ self._get("/api/host", set_cookies=True, version='1.1')
+ self.assertStatus(401)
+ time.sleep(6)
+ self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '28800'])
+ self.set_jwt_token(None)
+ self._post("/api/auth", {'username': 'admin', 'password': 'admin'}, set_cookies=True)
+ self.assertStatus(201)
+ self.set_jwt_token(self.jsonBody()['token'])
+ # the following call removes expired tokens from the blocklist
+ self._post("/api/auth/logout", set_cookies=True)
+ self.assertStatus(200)
+
+ def test_unauthorized(self):
+ # test with Authorization header
+ self._get("/api/host", version='1.1')
+ self.assertStatus(401)
+
+ # test with Cookies set
+ self._get("/api/host", set_cookies=True, version='1.1')
+ self.assertStatus(401)
+
+ def test_invalidate_token_by_admin(self):
+ # test with Authorization header
+ self._get("/api/host", version='1.1')
+ self.assertStatus(401)
+ self.create_user('user', 'user', ['read-only'])
+ time.sleep(1)
+ self._post("/api/auth", {'username': 'user', 'password': 'user'})
+ self.assertStatus(201)
+ self.set_jwt_token(self.jsonBody()['token'])
+ self._get("/api/host", version='1.1')
+ self.assertStatus(200)
+ time.sleep(1)
+ self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password', '--force-password',
+ 'user'],
+ 'user2')
+ time.sleep(1)
+ self._get("/api/host", version='1.1')
+ self.assertStatus(401)
+ self.set_jwt_token(None)
+ self._post("/api/auth", {'username': 'user', 'password': 'user2'})
+ self.assertStatus(201)
+ self.set_jwt_token(self.jsonBody()['token'])
+ self._get("/api/host", version='1.1')
+ self.assertStatus(200)
+ self.delete_user("user")
+
+ # test with Cookies set
+ self._get("/api/host", set_cookies=True, version='1.1')
+ self.assertStatus(401)
+ self.create_user('user', 'user', ['read-only'])
+ time.sleep(1)
+ self._post("/api/auth", {'username': 'user', 'password': 'user'}, set_cookies=True)
+ self.assertStatus(201)
+ self.set_jwt_token(self.jsonBody()['token'])
+ self._get("/api/host", set_cookies=True, version='1.1')
+ self.assertStatus(200)
+ time.sleep(1)
+ self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password', '--force-password',
+ 'user'],
+ 'user2')
+ time.sleep(1)
+ self._get("/api/host", set_cookies=True, version='1.1')
+ self.assertStatus(401)
+ self.set_jwt_token(None)
+ self._post("/api/auth", {'username': 'user', 'password': 'user2'}, set_cookies=True)
+ self.assertStatus(201)
+ self.set_jwt_token(self.jsonBody()['token'])
+ self._get("/api/host", set_cookies=True, version='1.1')
+ self.assertStatus(200)
+ self.delete_user("user")
+
+ def test_check_token(self):
+ # test with Authorization header
+ self.login("admin", "admin")
+ self._post("/api/auth/check", {"token": self.jsonBody()["token"]})
+ self.assertStatus(200)
+ data = self.jsonBody()
+ self.assertSchema(data, JObj(sub_elems={
+ "username": JLeaf(str),
+ "permissions": JObj(sub_elems={}, allow_unknown=True),
+ "sso": JLeaf(bool),
+ "pwdUpdateRequired": JLeaf(bool)
+ }, allow_unknown=False))
+ self.logout()
+
+ # test with Cookies set
+ self.login("admin", "admin", set_cookies=True)
+ self._post("/api/auth/check", {"token": self.jsonBody()["token"]}, set_cookies=True)
+ self.assertStatus(200)
+ data = self.jsonBody()
+ self.assertSchema(data, JObj(sub_elems={
+ "username": JLeaf(str),
+ "permissions": JObj(sub_elems={}, allow_unknown=True),
+ "sso": JLeaf(bool),
+ "pwdUpdateRequired": JLeaf(bool)
+ }, allow_unknown=False))
+ self.logout(set_cookies=True)
+
+ def test_check_wo_token(self):
+ # test with Authorization header
+ self.login("admin", "admin")
+ self._post("/api/auth/check", {"token": ""})
+ self.assertStatus(200)
+ data = self.jsonBody()
+ self.assertSchema(data, JObj(sub_elems={
+ "login_url": JLeaf(str),
+ "cluster_status": JLeaf(str)
+ }, allow_unknown=False))
+ self.logout()
+
+ # test with Cookies set
+ self.login("admin", "admin", set_cookies=True)
+ self._post("/api/auth/check", {"token": ""}, set_cookies=True)
+ self.assertStatus(200)
+ data = self.jsonBody()
+ self.assertSchema(data, JObj(sub_elems={
+ "login_url": JLeaf(str),
+ "cluster_status": JLeaf(str)
+ }, allow_unknown=False))
+ self.logout(set_cookies=True)
diff --git a/qa/tasks/mgr/dashboard/test_cephfs.py b/qa/tasks/mgr/dashboard/test_cephfs.py
new file mode 100644
index 000000000..4295b580f
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_cephfs.py
@@ -0,0 +1,292 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=too-many-public-methods
+
+from contextlib import contextmanager
+
+from .helper import DashboardTestCase, JLeaf, JList, JObj
+
+
+class CephfsTest(DashboardTestCase):
+ CEPHFS = True
+
+ AUTH_ROLES = ['cephfs-manager']
+
+ QUOTA_PATH = '/quotas'
+
+ def assertToHave(self, data, key):
+ self.assertIn(key, data)
+ self.assertIsNotNone(data[key])
+
+ def get_fs_id(self):
+ return self.fs.get_namespace_id()
+
+ def mk_dirs(self, path, expectedStatus=200):
+ self._post("/api/cephfs/{}/tree".format(self.get_fs_id()),
+ params={'path': path})
+ self.assertStatus(expectedStatus)
+
+ def rm_dir(self, path, expectedStatus=200):
+ self._delete("/api/cephfs/{}/tree".format(self.get_fs_id()),
+ params={'path': path})
+ self.assertStatus(expectedStatus)
+
+ def get_root_directory(self, expectedStatus=200):
+ data = self._get("/api/cephfs/{}/get_root_directory".format(self.get_fs_id()))
+ self.assertStatus(expectedStatus)
+ self.assertIsInstance(data, dict)
+ return data
+
+ def ls_dir(self, path, expectedLength, depth=None):
+ return self._ls_dir(path, expectedLength, depth, "api")
+
+ def ui_ls_dir(self, path, expectedLength, depth=None):
+ return self._ls_dir(path, expectedLength, depth, "ui-api")
+
+ def _ls_dir(self, path, expectedLength, depth, baseApiPath):
+ params = {'path': path}
+ if depth is not None:
+ params['depth'] = depth
+ data = self._get("/{}/cephfs/{}/ls_dir".format(baseApiPath, self.get_fs_id()),
+ params=params)
+ self.assertStatus(200)
+ self.assertIsInstance(data, list)
+ self.assertEqual(len(data), expectedLength)
+ return data
+
+ def set_quotas(self, max_bytes=None, max_files=None):
+ quotas = {
+ 'max_bytes': max_bytes,
+ 'max_files': max_files
+ }
+ self._put("/api/cephfs/{}/quota".format(self.get_fs_id()), data=quotas,
+ params={'path': self.QUOTA_PATH})
+ self.assertStatus(200)
+
+ def assert_quotas(self, max_bytes, files):
+ data = self.ls_dir('/', 1)[0]
+ self.assertEqual(data['quotas']['max_bytes'], max_bytes)
+ self.assertEqual(data['quotas']['max_files'], files)
+
+ @contextmanager
+ def new_quota_dir(self):
+ self.mk_dirs(self.QUOTA_PATH)
+ self.set_quotas(1024 ** 3, 1024)
+ yield 1
+ self.rm_dir(self.QUOTA_PATH)
+
+ @DashboardTestCase.RunAs('test', 'test', ['block-manager'])
+ def test_access_permissions(self):
+ fs_id = self.get_fs_id()
+ self._get("/api/cephfs/{}/clients".format(fs_id))
+ self.assertStatus(403)
+ self._get("/api/cephfs/{}".format(fs_id))
+ self.assertStatus(403)
+ self._get("/api/cephfs/{}/mds_counters".format(fs_id))
+ self.assertStatus(403)
+ self._get("/ui-api/cephfs/{}/tabs".format(fs_id))
+ self.assertStatus(403)
+
+ def test_cephfs_clients(self):
+ fs_id = self.get_fs_id()
+ data = self._get("/api/cephfs/{}/clients".format(fs_id))
+ self.assertStatus(200)
+
+ self.assertIn('status', data)
+ self.assertIn('data', data)
+
+ def test_cephfs_evict_client_does_not_exist(self):
+ fs_id = self.get_fs_id()
+ self._delete("/api/cephfs/{}/client/1234".format(fs_id))
+ self.assertStatus(404)
+
+ def test_cephfs_evict_invalid_client_id(self):
+ fs_id = self.get_fs_id()
+ self._delete("/api/cephfs/{}/client/xyz".format(fs_id))
+ self.assertStatus(400)
+ self.assertJsonBody({
+ "component": 'cephfs',
+ "code": "invalid_cephfs_client_id",
+ "detail": "Invalid cephfs client ID xyz"
+ })
+
+ def test_cephfs_get(self):
+ fs_id = self.get_fs_id()
+ data = self._get("/api/cephfs/{}/".format(fs_id))
+ self.assertStatus(200)
+
+ self.assertToHave(data, 'cephfs')
+ self.assertToHave(data, 'standbys')
+ self.assertToHave(data, 'versions')
+
+ def test_cephfs_mds_counters(self):
+ fs_id = self.get_fs_id()
+ data = self._get("/api/cephfs/{}/mds_counters".format(fs_id))
+ self.assertStatus(200)
+
+ self.assertIsInstance(data, dict)
+ self.assertIsNotNone(data)
+
+ def test_cephfs_mds_counters_wrong(self):
+ self._get("/api/cephfs/baadbaad/mds_counters")
+ self.assertStatus(400)
+ self.assertJsonBody({
+ "component": 'cephfs',
+ "code": "invalid_cephfs_id",
+ "detail": "Invalid cephfs ID baadbaad"
+ })
+
+ def test_cephfs_list(self):
+ data = self._get("/api/cephfs/")
+ self.assertStatus(200)
+
+ self.assertIsInstance(data, list)
+ cephfs = data[0]
+ self.assertToHave(cephfs, 'id')
+ self.assertToHave(cephfs, 'mdsmap')
+
+ def test_cephfs_get_quotas(self):
+ fs_id = self.get_fs_id()
+ data = self._get("/api/cephfs/{}/quota?path=/".format(fs_id))
+ self.assertStatus(200)
+ self.assertSchema(data, JObj({
+ 'max_bytes': int,
+ 'max_files': int
+ }))
+
+ def test_cephfs_tabs(self):
+ fs_id = self.get_fs_id()
+ data = self._get("/ui-api/cephfs/{}/tabs".format(fs_id))
+ self.assertStatus(200)
+ self.assertIsInstance(data, dict)
+
+ # Pools
+ pools = data['pools']
+ self.assertIsInstance(pools, list)
+ self.assertGreater(len(pools), 0)
+ for pool in pools:
+ self.assertEqual(pool['size'], pool['used'] + pool['avail'])
+
+ # Ranks
+ self.assertToHave(data, 'ranks')
+ self.assertIsInstance(data['ranks'], list)
+
+ # Name
+ self.assertToHave(data, 'name')
+ self.assertIsInstance(data['name'], str)
+
+ # Standbys
+ self.assertToHave(data, 'standbys')
+ self.assertIsInstance(data['standbys'], str)
+
+ # MDS counters
+ counters = data['mds_counters']
+ self.assertIsInstance(counters, dict)
+ self.assertGreater(len(counters.keys()), 0)
+ for k, v in counters.items():
+ self.assertEqual(v['name'], k)
+
+ # Clients
+ self.assertToHave(data, 'clients')
+ clients = data['clients']
+ self.assertToHave(clients, 'data')
+ self.assertIsInstance(clients['data'], list)
+ self.assertToHave(clients, 'status')
+ self.assertIsInstance(clients['status'], int)
+
+ def test_ls_mk_rm_dir(self):
+ self.ls_dir('/', 0)
+
+ self.mk_dirs('/pictures/birds')
+ self.ls_dir('/', 2, 3)
+ self.ls_dir('/pictures', 1)
+
+ self.rm_dir('/pictures', 500)
+ self.rm_dir('/pictures/birds')
+ self.rm_dir('/pictures')
+
+ self.ls_dir('/', 0)
+
+ def test_snapshots(self):
+ fs_id = self.get_fs_id()
+ self.mk_dirs('/movies/dune/extended_version')
+
+ self._post("/api/cephfs/{}/snapshot".format(fs_id),
+ params={'path': '/movies/dune', 'name': 'test'})
+ self.assertStatus(200)
+
+ data = self.ls_dir('/movies', 1)
+ self.assertSchema(data[0], JObj(sub_elems={
+ 'name': JLeaf(str),
+ 'path': JLeaf(str),
+ 'parent': JLeaf(str),
+ 'snapshots': JList(JObj(sub_elems={
+ 'name': JLeaf(str),
+ 'path': JLeaf(str),
+ 'created': JLeaf(str)
+ })),
+ 'quotas': JObj(sub_elems={
+ 'max_bytes': JLeaf(int),
+ 'max_files': JLeaf(int)
+ })
+ }))
+ snapshots = data[0]['snapshots']
+ self.assertEqual(len(snapshots), 1)
+ snapshot = snapshots[0]
+ self.assertEqual(snapshot['name'], "test")
+ self.assertEqual(snapshot['path'], "/movies/dune/.snap/test")
+
+ # Should have filtered out "_test_$timestamp"
+ data = self.ls_dir('/movies/dune', 1)
+ snapshots = data[0]['snapshots']
+ self.assertEqual(len(snapshots), 0)
+
+ self._delete("/api/cephfs/{}/snapshot".format(fs_id),
+ params={'path': '/movies/dune', 'name': 'test'})
+ self.assertStatus(200)
+
+ data = self.ls_dir('/movies', 1)
+ self.assertEqual(len(data[0]['snapshots']), 0)
+
+ # Cleanup. Note, the CephFS Python extension (and therefor the Dashboard
+ # REST API) does not support recursive deletion of a directory.
+ self.rm_dir('/movies/dune/extended_version')
+ self.rm_dir('/movies/dune')
+ self.rm_dir('/movies')
+
+ def test_quotas_default(self):
+ self.mk_dirs(self.QUOTA_PATH)
+ self.assert_quotas(0, 0)
+ self.rm_dir(self.QUOTA_PATH)
+
+ def test_quotas_set_both(self):
+ with self.new_quota_dir():
+ self.assert_quotas(1024 ** 3, 1024)
+
+ def test_quotas_set_only_bytes(self):
+ with self.new_quota_dir():
+ self.set_quotas(2048 ** 3)
+ self.assert_quotas(2048 ** 3, 1024)
+
+ def test_quotas_set_only_files(self):
+ with self.new_quota_dir():
+ self.set_quotas(None, 2048)
+ self.assert_quotas(1024 ** 3, 2048)
+
+ def test_quotas_unset_both(self):
+ with self.new_quota_dir():
+ self.set_quotas(0, 0)
+ self.assert_quotas(0, 0)
+
+ def test_listing_of_root_dir(self):
+ self.ls_dir('/', 0) # Should not list root
+ ui_root = self.ui_ls_dir('/', 1)[0] # Should list root by default
+ root = self.get_root_directory()
+ self.assertEqual(ui_root, root)
+
+ def test_listing_of_ui_api_ls_on_deeper_levels(self):
+ # The UI-API and API ls_dir methods should behave the same way on deeper levels
+ self.mk_dirs('/pictures')
+ api_ls = self.ls_dir('/pictures', 0)
+ ui_api_ls = self.ui_ls_dir('/pictures', 0)
+ self.assertEqual(api_ls, ui_api_ls)
+ self.rm_dir('/pictures')
diff --git a/qa/tasks/mgr/dashboard/test_cluster.py b/qa/tasks/mgr/dashboard/test_cluster.py
new file mode 100644
index 000000000..14f854279
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_cluster.py
@@ -0,0 +1,23 @@
+from .helper import DashboardTestCase, JLeaf, JObj
+
+
+class ClusterTest(DashboardTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.reset_session()
+
+ def test_get_status(self):
+ data = self._get('/api/cluster', version='0.1')
+ self.assertStatus(200)
+ self.assertSchema(data, JObj(sub_elems={
+ "status": JLeaf(str)
+ }, allow_unknown=False))
+
+ def test_update_status(self):
+ req = {'status': 'POST_INSTALLED'}
+ self._put('/api/cluster', req, version='0.1')
+ self.assertStatus(200)
+ data = self._get('/api/cluster', version='0.1')
+ self.assertStatus(200)
+ self.assertEqual(data, req)
diff --git a/qa/tasks/mgr/dashboard/test_cluster_configuration.py b/qa/tasks/mgr/dashboard/test_cluster_configuration.py
new file mode 100644
index 000000000..9c8245d23
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_cluster_configuration.py
@@ -0,0 +1,398 @@
+from __future__ import absolute_import
+
+from .helper import DashboardTestCase
+
+
+class ClusterConfigurationTest(DashboardTestCase):
+
+ def test_list(self):
+ data = self._get('/api/cluster_conf')
+ self.assertStatus(200)
+ self.assertIsInstance(data, list)
+ self.assertGreater(len(data), 1000)
+ for conf in data:
+ self._validate_single(conf)
+
+ def test_get(self):
+ data = self._get('/api/cluster_conf/admin_socket')
+ self.assertStatus(200)
+ self._validate_single(data)
+ self.assertIn('enum_values', data)
+
+ data = self._get('/api/cluster_conf/fantasy_name')
+ self.assertStatus(404)
+
+ def test_get_specific_db_config_option(self):
+ config_name = 'mon_allow_pool_delete'
+
+ orig_value = self._get_config_by_name(config_name)
+
+ self._ceph_cmd(['config', 'set', 'mon', config_name, 'true'])
+ self.wait_until_equal(
+ lambda: self._get_config_by_name(config_name),
+ [{'section': 'mon', 'value': 'true'}],
+ timeout=30,
+ period=1)
+
+ self._ceph_cmd(['config', 'set', 'mon', config_name, 'false'])
+ self.wait_until_equal(
+ lambda: self._get_config_by_name(config_name),
+ [{'section': 'mon', 'value': 'false'}],
+ timeout=30,
+ period=1)
+
+ # restore value
+ if orig_value:
+ self._ceph_cmd(['config', 'set', 'mon', config_name, orig_value[0]['value']])
+
+ def test_filter_config_options(self):
+ config_names = ['osd_scrub_during_recovery', 'osd_scrub_begin_hour', 'osd_scrub_end_hour']
+ data = self._get('/api/cluster_conf/filter?names={}'.format(','.join(config_names)))
+ self.assertStatus(200)
+ self.assertIsInstance(data, list)
+ self.assertEqual(len(data), 3)
+ for conf in data:
+ self._validate_single(conf)
+ self.assertIn(conf['name'], config_names)
+
+ def test_filter_config_options_empty_names(self):
+ self._get('/api/cluster_conf/filter?names=')
+ self.assertStatus(404)
+ self.assertEqual(self._resp.json()['detail'], 'Config options `` not found')
+
+ def test_filter_config_options_unknown_name(self):
+ self._get('/api/cluster_conf/filter?names=abc')
+ self.assertStatus(404)
+ self.assertEqual(self._resp.json()['detail'], 'Config options `abc` not found')
+
+ def test_filter_config_options_contains_unknown_name(self):
+ config_names = ['osd_scrub_during_recovery', 'osd_scrub_begin_hour', 'abc']
+ data = self._get('/api/cluster_conf/filter?names={}'.format(','.join(config_names)))
+ self.assertStatus(200)
+ self.assertIsInstance(data, list)
+ self.assertEqual(len(data), 2)
+ for conf in data:
+ self._validate_single(conf)
+ self.assertIn(conf['name'], config_names)
+
+ def test_create(self):
+ config_name = 'debug_ms'
+ orig_value = self._get_config_by_name(config_name)
+
+ # remove all existing settings for equal preconditions
+ self._clear_all_values_for_config_option(config_name)
+
+ expected_result = [{'section': 'mon', 'value': '0/3'}]
+
+ self._post('/api/cluster_conf', {
+ 'name': config_name,
+ 'value': expected_result
+ })
+ self.assertStatus(201)
+ self.wait_until_equal(
+ lambda: self._get_config_by_name(config_name),
+ expected_result,
+ timeout=30,
+ period=1)
+
+ # reset original value
+ self._clear_all_values_for_config_option(config_name)
+ self._reset_original_values(config_name, orig_value)
+
+ def test_delete(self):
+ config_name = 'debug_ms'
+ orig_value = self._get_config_by_name(config_name)
+
+ # set a config option
+ expected_result = [{'section': 'mon', 'value': '0/3'}]
+ self._post('/api/cluster_conf', {
+ 'name': config_name,
+ 'value': expected_result
+ })
+ self.assertStatus(201)
+ self.wait_until_equal(
+ lambda: self._get_config_by_name(config_name),
+ expected_result,
+ timeout=30,
+ period=1)
+
+ # delete it and check if it's deleted
+ self._delete('/api/cluster_conf/{}?section={}'.format(config_name, 'mon'))
+ self.assertStatus(204)
+ self.wait_until_equal(
+ lambda: self._get_config_by_name(config_name),
+ None,
+ timeout=30,
+ period=1)
+
+ # reset original value
+ self._clear_all_values_for_config_option(config_name)
+ self._reset_original_values(config_name, orig_value)
+
+ def test_create_cant_update_at_runtime(self):
+ config_name = 'public_bind_addr' # not updatable
+ config_value = [{'section': 'global', 'value': 'true'}]
+ orig_value = self._get_config_by_name(config_name)
+
+ # try to set config option and check if it fails
+ self._post('/api/cluster_conf', {
+ 'name': config_name,
+ 'value': config_value
+ })
+ self.assertStatus(400)
+ self.assertError(code='config_option_not_updatable_at_runtime',
+ component='cluster_configuration',
+ detail='Config option {} is/are not updatable at runtime'.format(
+ config_name))
+
+ # check if config option value is still the original one
+ self.wait_until_equal(
+ lambda: self._get_config_by_name(config_name),
+ orig_value,
+ timeout=30,
+ period=1)
+
+ def test_create_two_values(self):
+ config_name = 'debug_ms'
+ orig_value = self._get_config_by_name(config_name)
+
+ # remove all existing settings for equal preconditions
+ self._clear_all_values_for_config_option(config_name)
+
+ expected_result = [{'section': 'mon', 'value': '0/3'},
+ {'section': 'osd', 'value': '0/5'}]
+
+ self._post('/api/cluster_conf', {
+ 'name': config_name,
+ 'value': expected_result
+ })
+ self.assertStatus(201)
+ self.wait_until_equal(
+ lambda: self._get_config_by_name(config_name),
+ expected_result,
+ timeout=30,
+ period=1)
+
+ # reset original value
+ self._clear_all_values_for_config_option(config_name)
+ self._reset_original_values(config_name, orig_value)
+
+ def test_create_can_handle_none_values(self):
+ config_name = 'debug_ms'
+ orig_value = self._get_config_by_name(config_name)
+
+ # remove all existing settings for equal preconditions
+ self._clear_all_values_for_config_option(config_name)
+
+ self._post('/api/cluster_conf', {
+ 'name': config_name,
+ 'value': [{'section': 'mon', 'value': '0/3'},
+ {'section': 'osd', 'value': None}]
+ })
+ self.assertStatus(201)
+
+ expected_result = [{'section': 'mon', 'value': '0/3'}]
+ self.wait_until_equal(
+ lambda: self._get_config_by_name(config_name),
+ expected_result,
+ timeout=30,
+ period=1)
+
+ # reset original value
+ self._clear_all_values_for_config_option(config_name)
+ self._reset_original_values(config_name, orig_value)
+
+ def test_create_can_handle_boolean_values(self):
+ config_name = 'mon_allow_pool_delete'
+ orig_value = self._get_config_by_name(config_name)
+
+ # remove all existing settings for equal preconditions
+ self._clear_all_values_for_config_option(config_name)
+
+ expected_result = [{'section': 'mon', 'value': 'true'}]
+
+ self._post('/api/cluster_conf', {
+ 'name': config_name,
+ 'value': [{'section': 'mon', 'value': True}]})
+ self.assertStatus(201)
+
+ self.wait_until_equal(
+ lambda: self._get_config_by_name(config_name),
+ expected_result,
+ timeout=30,
+ period=1)
+
+ # reset original value
+ self._clear_all_values_for_config_option(config_name)
+ self._reset_original_values(config_name, orig_value)
+
+ def test_bulk_set(self):
+ expected_result = {
+ 'osd_max_backfills': {'section': 'osd', 'value': '1'},
+ 'osd_recovery_max_active': {'section': 'osd', 'value': '3'},
+ 'osd_recovery_max_single_start': {'section': 'osd', 'value': '1'},
+ 'osd_recovery_sleep': {'section': 'osd', 'value': '2.000000'}
+ }
+ orig_values = dict()
+
+ for config_name in expected_result:
+ orig_values[config_name] = self._get_config_by_name(config_name)
+
+ # remove all existing settings for equal preconditions
+ self._clear_all_values_for_config_option(config_name)
+
+ self._put('/api/cluster_conf', {'options': expected_result})
+ self.assertStatus(200)
+
+ for config_name, value in expected_result.items():
+ self.wait_until_equal(
+ lambda: self._get_config_by_name(config_name),
+ [value],
+ timeout=30,
+ period=1)
+
+ # reset original value
+ self._clear_all_values_for_config_option(config_name)
+ self._reset_original_values(config_name, orig_values[config_name])
+
+ def test_bulk_set_cant_update_at_runtime(self):
+ config_options = {
+ 'public_bind_addr': {'section': 'global', 'value': '1.2.3.4:567'}, # not updatable
+ 'public_network': {'section': 'global', 'value': '10.0.0.0/8'} # not updatable
+ }
+ orig_values = dict()
+
+ for config_name in config_options:
+ orig_values[config_name] = self._get_config_by_name(config_name)
+
+ # try to set config options and see if it fails
+ self._put('/api/cluster_conf', {'options': config_options})
+ self.assertStatus(400)
+ self.assertError(code='config_option_not_updatable_at_runtime',
+ component='cluster_configuration',
+ detail='Config option {} is/are not updatable at runtime'.format(
+ ', '.join(config_options.keys())))
+
+ # check if config option values are still the original ones
+ for config_name, value in orig_values.items():
+ self.wait_until_equal(
+ lambda: self._get_config_by_name(config_name),
+ value,
+ timeout=30,
+ period=1)
+
+ def test_bulk_set_cant_update_at_runtime_partial(self):
+ config_options = {
+ 'public_bind_addr': {'section': 'global', 'value': 'true'}, # not updatable
+ 'log_to_stderr': {'section': 'global', 'value': 'true'} # updatable
+ }
+ orig_values = dict()
+
+ for config_name in config_options:
+ orig_values[config_name] = self._get_config_by_name(config_name)
+
+ # try to set config options and see if it fails
+ self._put('/api/cluster_conf', {'options': config_options})
+ self.assertStatus(400)
+ self.assertError(code='config_option_not_updatable_at_runtime',
+ component='cluster_configuration',
+ detail='Config option {} is/are not updatable at runtime'.format(
+ 'public_bind_addr'))
+
+ # check if config option values are still the original ones
+ for config_name, value in orig_values.items():
+ self.wait_until_equal(
+ lambda: self._get_config_by_name(config_name),
+ value,
+ timeout=30,
+ period=1)
+
+ def test_check_existence(self):
+ """
+ This test case is intended to check the existence of all hard coded config options used by
+ the dashboard.
+ If you include further hard coded options in the dashboard, feel free to add them to the
+ list.
+ """
+ hard_coded_options = [
+ 'osd_max_backfills', # osd-recv-speed
+ 'osd_recovery_max_active', # osd-recv-speed
+ 'osd_recovery_max_single_start', # osd-recv-speed
+ 'osd_recovery_sleep', # osd-recv-speed
+ 'osd_scrub_during_recovery', # osd-pg-scrub
+ 'osd_scrub_begin_hour', # osd-pg-scrub
+ 'osd_scrub_end_hour', # osd-pg-scrub
+ 'osd_scrub_begin_week_day', # osd-pg-scrub
+ 'osd_scrub_end_week_day', # osd-pg-scrub
+ 'osd_scrub_min_interval', # osd-pg-scrub
+ 'osd_scrub_max_interval', # osd-pg-scrub
+ 'osd_deep_scrub_interval', # osd-pg-scrub
+ 'osd_scrub_auto_repair', # osd-pg-scrub
+ 'osd_max_scrubs', # osd-pg-scrub
+ 'osd_scrub_priority', # osd-pg-scrub
+ 'osd_scrub_sleep', # osd-pg-scrub
+ 'osd_scrub_auto_repair_num_errors', # osd-pg-scrub
+ 'osd_debug_deep_scrub_sleep', # osd-pg-scrub
+ 'osd_deep_scrub_keys', # osd-pg-scrub
+ 'osd_deep_scrub_large_omap_object_key_threshold', # osd-pg-scrub
+ 'osd_deep_scrub_large_omap_object_value_sum_threshold', # osd-pg-scrub
+ 'osd_deep_scrub_randomize_ratio', # osd-pg-scrub
+ 'osd_deep_scrub_stride', # osd-pg-scrub
+ 'osd_deep_scrub_update_digest_min_age', # osd-pg-scrub
+ 'osd_requested_scrub_priority', # osd-pg-scrub
+ 'osd_scrub_backoff_ratio', # osd-pg-scrub
+ 'osd_scrub_chunk_max', # osd-pg-scrub
+ 'osd_scrub_chunk_min', # osd-pg-scrub
+ 'osd_scrub_cost', # osd-pg-scrub
+ 'osd_scrub_interval_randomize_ratio', # osd-pg-scrub
+ 'osd_scrub_invalid_stats', # osd-pg-scrub
+ 'osd_scrub_load_threshold', # osd-pg-scrub
+ 'osd_scrub_max_preemptions', # osd-pg-scrub
+ 'mon_allow_pool_delete' # pool-list
+ ]
+
+ for config_option in hard_coded_options:
+ self._get('/api/cluster_conf/{}'.format(config_option))
+ self.assertStatus(200)
+
+ def _validate_single(self, data):
+ self.assertIn('name', data)
+ self.assertIn('daemon_default', data)
+ self.assertIn('long_desc', data)
+ self.assertIn('level', data)
+ self.assertIn('default', data)
+ self.assertIn('see_also', data)
+ self.assertIn('tags', data)
+ self.assertIn('min', data)
+ self.assertIn('max', data)
+ self.assertIn('services', data)
+ self.assertIn('type', data)
+ self.assertIn('desc', data)
+ self.assertIn(data['type'], ['str', 'bool', 'float', 'int', 'size', 'uint', 'addr',
+ 'addrvec', 'uuid', 'secs', 'millisecs'])
+
+ if 'value' in data:
+ self.assertIn('source', data)
+ self.assertIsInstance(data['value'], list)
+
+ for entry in data['value']:
+ self.assertIsInstance(entry, dict)
+ self.assertIn('section', entry)
+ self.assertIn('value', entry)
+
+ def _get_config_by_name(self, conf_name):
+ data = self._get('/api/cluster_conf/{}'.format(conf_name))
+ if 'value' in data:
+ return data['value']
+ return None
+
+ def _clear_all_values_for_config_option(self, config_name):
+ values = self._get_config_by_name(config_name)
+ if values:
+ for value in values:
+ self._ceph_cmd(['config', 'rm', value['section'], config_name])
+
+ def _reset_original_values(self, config_name, orig_values):
+ if orig_values:
+ for value in orig_values:
+ self._ceph_cmd(['config', 'set', value['section'], config_name, value['value']])
diff --git a/qa/tasks/mgr/dashboard/test_crush_rule.py b/qa/tasks/mgr/dashboard/test_crush_rule.py
new file mode 100644
index 000000000..aa2250b1d
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_crush_rule.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from .helper import DashboardTestCase, JList, JObj
+
+
+class CrushRuleTest(DashboardTestCase):
+
+ AUTH_ROLES = ['pool-manager']
+
+ rule_schema = JObj(sub_elems={
+ 'rule_id': int,
+ 'rule_name': str,
+ 'steps': JList(JObj({}, allow_unknown=True))
+ }, allow_unknown=True)
+
+ def create_and_delete_rule(self, data):
+ name = data['name']
+ # Creates rule
+ self._post('/api/crush_rule', data)
+ self.assertStatus(201)
+ # Makes sure rule exists
+ rule = self._get('/api/crush_rule/{}'.format(name), version='2.0')
+ self.assertStatus(200)
+ self.assertSchemaBody(self.rule_schema)
+ self.assertEqual(rule['rule_name'], name)
+ # Deletes rule
+ self._delete('/api/crush_rule/{}'.format(name))
+ self.assertStatus(204)
+
+ @DashboardTestCase.RunAs('test', 'test', ['rgw-manager'])
+ def test_read_access_permissions(self):
+ self._get('/api/crush_rule', version='2.0')
+ self.assertStatus(403)
+
+ @DashboardTestCase.RunAs('test', 'test', ['read-only'])
+ def test_write_access_permissions(self):
+ self._get('/api/crush_rule', version='2.0')
+ self.assertStatus(200)
+ data = {'name': 'some_rule', 'root': 'default', 'failure_domain': 'osd'}
+ self._post('/api/crush_rule', data)
+ self.assertStatus(403)
+ self._delete('/api/crush_rule/default')
+ self.assertStatus(403)
+
+ @classmethod
+ def tearDownClass(cls):
+ super(CrushRuleTest, cls).tearDownClass()
+ cls._ceph_cmd(['osd', 'crush', 'rule', 'rm', 'some_rule'])
+ cls._ceph_cmd(['osd', 'crush', 'rule', 'rm', 'another_rule'])
+
+ def test_list(self):
+ self._get('/api/crush_rule', version='2.0')
+ self.assertStatus(200)
+ self.assertSchemaBody(JList(self.rule_schema))
+
+ def test_create(self):
+ self.create_and_delete_rule({
+ 'name': 'some_rule',
+ 'root': 'default',
+ 'failure_domain': 'osd'
+ })
+
+ @DashboardTestCase.RunAs('test', 'test', ['pool-manager', 'cluster-manager'])
+ def test_create_with_ssd(self):
+ data = self._get('/api/osd/0')
+ self.assertStatus(200)
+ device_class = data['osd_metadata']['default_device_class']
+ self.create_and_delete_rule({
+ 'name': 'another_rule',
+ 'root': 'default',
+ 'failure_domain': 'osd',
+ 'device_class': device_class
+ })
+
+ def test_crush_rule_info(self):
+ self._get('/ui-api/crush_rule/info')
+ self.assertStatus(200)
+ self.assertSchemaBody(JObj({
+ 'names': JList(str),
+ 'nodes': JList(JObj({}, allow_unknown=True)),
+ 'roots': JList(int)
+ }))
diff --git a/qa/tasks/mgr/dashboard/test_erasure_code_profile.py b/qa/tasks/mgr/dashboard/test_erasure_code_profile.py
new file mode 100644
index 000000000..7fb7c1c82
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_erasure_code_profile.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from .helper import DashboardTestCase, JList, JObj
+
+
+class ECPTest(DashboardTestCase):
+
+ AUTH_ROLES = ['pool-manager']
+
+ @DashboardTestCase.RunAs('test', 'test', ['rgw-manager'])
+ def test_read_access_permissions(self):
+ self._get('/api/erasure_code_profile')
+ self.assertStatus(403)
+
+ @DashboardTestCase.RunAs('test', 'test', ['read-only'])
+ def test_write_access_permissions(self):
+ self._get('/api/erasure_code_profile')
+ self.assertStatus(200)
+ data = {'name': 'ecp32', 'k': 3, 'm': 2}
+ self._post('/api/erasure_code_profile', data)
+ self.assertStatus(403)
+ self._delete('/api/erasure_code_profile/default')
+ self.assertStatus(403)
+
+ @classmethod
+ def tearDownClass(cls):
+ super(ECPTest, cls).tearDownClass()
+ cls._ceph_cmd(['osd', 'erasure-code-profile', 'rm', 'ecp32'])
+ cls._ceph_cmd(['osd', 'erasure-code-profile', 'rm', 'lrc'])
+
+ def test_list(self):
+ data = self._get('/api/erasure_code_profile')
+ self.assertStatus(200)
+
+ default = [p for p in data if p['name'] == 'default']
+ if default:
+ default_ecp = {
+ 'k': 2,
+ 'technique': 'reed_sol_van',
+ 'm': 1,
+ 'name': 'default',
+ 'plugin': 'jerasure'
+ }
+ if 'crush-failure-domain' in default[0]:
+ default_ecp['crush-failure-domain'] = default[0]['crush-failure-domain']
+ self.assertSubset(default_ecp, default[0])
+ get_data = self._get('/api/erasure_code_profile/default')
+ self.assertEqual(get_data, default[0])
+
+ def test_create(self):
+ data = {'name': 'ecp32', 'k': 3, 'm': 2}
+ self._post('/api/erasure_code_profile', data)
+ self.assertStatus(201)
+
+ self._get('/api/erasure_code_profile/ecp32')
+ self.assertJsonSubset({
+ 'crush-device-class': '',
+ 'crush-failure-domain': 'osd',
+ 'crush-root': 'default',
+ 'jerasure-per-chunk-alignment': 'false',
+ 'k': 3,
+ 'm': 2,
+ 'name': 'ecp32',
+ 'plugin': 'jerasure',
+ 'technique': 'reed_sol_van',
+ })
+
+ self.assertStatus(200)
+
+ self._delete('/api/erasure_code_profile/ecp32')
+ self.assertStatus(204)
+
+ def test_create_plugin(self):
+ data = {'name': 'lrc', 'k': '2', 'm': '2', 'l': '2', 'plugin': 'lrc'}
+ self._post('/api/erasure_code_profile', data)
+ self.assertJsonBody(None)
+ self.assertStatus(201)
+
+ self._get('/api/erasure_code_profile/lrc')
+ self.assertJsonBody({
+ 'crush-device-class': '',
+ 'crush-failure-domain': 'host',
+ 'crush-root': 'default',
+ 'k': 2,
+ 'l': '2',
+ 'm': 2,
+ 'name': 'lrc',
+ 'plugin': 'lrc'
+ })
+
+ self.assertStatus(200)
+
+ self._delete('/api/erasure_code_profile/lrc')
+ self.assertStatus(204)
+
+ def test_ecp_info(self):
+ self._get('/ui-api/erasure_code_profile/info')
+ self.assertSchemaBody(JObj({
+ 'names': JList(str),
+ 'plugins': JList(str),
+ 'directory': str,
+ 'nodes': JList(JObj({}, allow_unknown=True))
+ }))
diff --git a/qa/tasks/mgr/dashboard/test_feedback.py b/qa/tasks/mgr/dashboard/test_feedback.py
new file mode 100644
index 000000000..0ec5ac318
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_feedback.py
@@ -0,0 +1,36 @@
+import time
+
+from .helper import DashboardTestCase
+
+
+class FeedbackTest(DashboardTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ cls._ceph_cmd(['mgr', 'module', 'enable', 'feedback'])
+ time.sleep(10)
+
+ def test_create_api_key(self):
+ self._post('/api/feedback/api_key', {'api_key': 'testapikey'}, version='0.1')
+ self.assertStatus(201)
+
+ def test_get_api_key(self):
+ response = self._get('/api/feedback/api_key', version='0.1')
+ self.assertStatus(200)
+ self.assertEqual(response, 'testapikey')
+
+ def test_remove_api_key(self):
+ self._delete('/api/feedback/api_key', version='0.1')
+ self.assertStatus(204)
+
+ def test_issue_tracker_create_with_invalid_key(self):
+ self._post('/api/feedback', {'api_key': 'invalidapikey', 'description': 'test',
+ 'project': 'dashboard', 'subject': 'sub', 'tracker': 'bug'},
+ version='0.1')
+ self.assertStatus(400)
+
+ def test_issue_tracker_create_with_invalid_params(self):
+ self._post('/api/feedback', {'api_key': '', 'description': 'test', 'project': 'xyz',
+ 'subject': 'testsub', 'tracker': 'invalid'}, version='0.1')
+ self.assertStatus(400)
diff --git a/qa/tasks/mgr/dashboard/test_health.py b/qa/tasks/mgr/dashboard/test_health.py
new file mode 100644
index 000000000..b6ffade4c
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_health.py
@@ -0,0 +1,309 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from .helper import (DashboardTestCase, JAny, JLeaf, JList, JObj,
+ addrvec_schema, module_options_schema)
+
+
+class HealthTest(DashboardTestCase):
+ CEPHFS = True
+
+ __pg_info_schema = JObj({
+ 'object_stats': JObj({
+ 'num_objects': int,
+ 'num_object_copies': int,
+ 'num_objects_degraded': int,
+ 'num_objects_misplaced': int,
+ 'num_objects_unfound': int
+ }),
+ 'pgs_per_osd': float,
+ 'statuses': JObj({}, allow_unknown=True, unknown_schema=int)
+ })
+
+ __mdsmap_schema = JObj({
+ 'session_autoclose': int,
+ 'balancer': str,
+ 'bal_rank_mask': str,
+ 'up': JObj({}, allow_unknown=True),
+ 'last_failure_osd_epoch': int,
+ 'in': JList(int),
+ 'last_failure': int,
+ 'max_file_size': int,
+ 'explicitly_allowed_features': int,
+ 'damaged': JList(int),
+ 'tableserver': int,
+ 'failed': JList(int),
+ 'metadata_pool': int,
+ 'epoch': int,
+ 'stopped': JList(int),
+ 'max_mds': int,
+ 'compat': JObj({
+ 'compat': JObj({}, allow_unknown=True),
+ 'ro_compat': JObj({}, allow_unknown=True),
+ 'incompat': JObj({}, allow_unknown=True)
+ }),
+ 'required_client_features': JObj({}, allow_unknown=True),
+ 'data_pools': JList(int),
+ 'info': JObj({}, allow_unknown=True),
+ 'fs_name': str,
+ 'created': str,
+ 'standby_count_wanted': int,
+ 'enabled': bool,
+ 'modified': str,
+ 'session_timeout': int,
+ 'flags': int,
+ 'flags_state': JObj({
+ 'joinable': bool,
+ 'allow_snaps': bool,
+ 'allow_multimds_snaps': bool,
+ 'allow_standby_replay': bool,
+ 'refuse_client_session': bool
+ }),
+ 'ever_allowed_features': int,
+ 'root': int
+ })
+
+ def test_minimal_health(self):
+ data = self._get('/api/health/minimal')
+ self.assertStatus(200)
+ schema = JObj({
+ 'client_perf': JObj({
+ 'read_bytes_sec': int,
+ 'read_op_per_sec': int,
+ 'recovering_bytes_per_sec': int,
+ 'write_bytes_sec': int,
+ 'write_op_per_sec': int
+ }),
+ 'df': JObj({
+ 'stats': JObj({
+ 'total_avail_bytes': int,
+ 'total_bytes': int,
+ 'total_used_raw_bytes': int,
+ })
+ }),
+ 'fs_map': JObj({
+ 'filesystems': JList(
+ JObj({
+ 'mdsmap': self.__mdsmap_schema
+ }),
+ ),
+ 'standbys': JList(JObj({}, allow_unknown=True)),
+ }),
+ 'health': JObj({
+ 'checks': JList(JObj({}, allow_unknown=True)),
+ 'mutes': JList(JObj({}, allow_unknown=True)),
+ 'status': str,
+ }),
+ 'hosts': int,
+ 'iscsi_daemons': JObj({
+ 'up': int,
+ 'down': int
+ }),
+ 'mgr_map': JObj({
+ 'active_name': str,
+ 'standbys': JList(JLeaf(dict))
+ }),
+ 'mon_status': JObj({
+ 'monmap': JObj({
+ 'mons': JList(JLeaf(dict)),
+ }),
+ 'quorum': JList(int)
+ }),
+ 'osd_map': JObj({
+ 'osds': JList(
+ JObj({
+ 'in': int,
+ 'up': int,
+ 'state': JList(str)
+ })),
+ }),
+ 'pg_info': self.__pg_info_schema,
+ 'pools': JList(JLeaf(dict)),
+ 'rgw': int,
+ 'scrub_status': str
+ })
+ self.assertSchema(data, schema)
+
+ def test_full_health(self):
+ data = self._get('/api/health/full')
+ self.assertStatus(200)
+ module_info_schema = JObj({
+ 'can_run': bool,
+ 'error_string': str,
+ 'name': str,
+ 'module_options': module_options_schema
+ })
+ schema = JObj({
+ 'client_perf': JObj({
+ 'read_bytes_sec': int,
+ 'read_op_per_sec': int,
+ 'recovering_bytes_per_sec': int,
+ 'write_bytes_sec': int,
+ 'write_op_per_sec': int
+ }),
+ 'df': JObj({
+ 'pools': JList(JObj({
+ 'stats': JObj({
+ 'stored': int,
+ 'stored_data': int,
+ 'stored_omap': int,
+ 'objects': int,
+ 'kb_used': int,
+ 'bytes_used': int,
+ 'data_bytes_used': int,
+ 'omap_bytes_used': int,
+ 'percent_used': float,
+ 'max_avail': int,
+ 'quota_objects': int,
+ 'quota_bytes': int,
+ 'dirty': int,
+ 'rd': int,
+ 'rd_bytes': int,
+ 'wr': int,
+ 'wr_bytes': int,
+ 'compress_bytes_used': int,
+ 'compress_under_bytes': int,
+ 'stored_raw': int,
+ 'avail_raw': int
+ }),
+ 'name': str,
+ 'id': int
+ })),
+ 'stats': JObj({
+ 'total_avail_bytes': int,
+ 'total_bytes': int,
+ 'total_used_bytes': int,
+ 'total_used_raw_bytes': int,
+ 'total_used_raw_ratio': float,
+ 'num_osds': int,
+ 'num_per_pool_osds': int,
+ 'num_per_pool_omap_osds': int
+ })
+ }),
+ 'fs_map': JObj({
+ 'compat': JObj({
+ 'compat': JObj({}, allow_unknown=True, unknown_schema=str),
+ 'incompat': JObj(
+ {}, allow_unknown=True, unknown_schema=str),
+ 'ro_compat': JObj(
+ {}, allow_unknown=True, unknown_schema=str)
+ }),
+ 'default_fscid': int,
+ 'epoch': int,
+ 'feature_flags': JObj(
+ {}, allow_unknown=True, unknown_schema=bool),
+ 'filesystems': JList(
+ JObj({
+ 'id': int,
+ 'mdsmap': self.__mdsmap_schema
+ }),
+ ),
+ 'standbys': JList(JObj({}, allow_unknown=True)),
+ }),
+ 'health': JObj({
+ 'checks': JList(JObj({}, allow_unknown=True)),
+ 'mutes': JList(JObj({}, allow_unknown=True)),
+ 'status': str,
+ }),
+ 'hosts': int,
+ 'iscsi_daemons': JObj({
+ 'up': int,
+ 'down': int
+ }),
+ 'mgr_map': JObj({
+ 'active_addr': str,
+ 'active_addrs': JObj({
+ 'addrvec': addrvec_schema
+ }),
+ 'active_change': str, # timestamp
+ 'active_mgr_features': int,
+ 'active_gid': int,
+ 'active_name': str,
+ 'always_on_modules': JObj({}, allow_unknown=True),
+ 'available': bool,
+ 'available_modules': JList(module_info_schema),
+ 'epoch': int,
+ 'modules': JList(str),
+ 'services': JObj(
+ {'dashboard': str}, # This module should always be present
+ allow_unknown=True, unknown_schema=str
+ ),
+ 'standbys': JList(JObj({
+ 'available_modules': JList(module_info_schema),
+ 'gid': int,
+ 'name': str,
+ 'mgr_features': int
+ }, allow_unknown=True))
+ }, allow_unknown=True),
+ 'mon_status': JObj({
+ 'election_epoch': int,
+ 'extra_probe_peers': JList(JAny(none=True)),
+ 'feature_map': JObj(
+ {}, allow_unknown=True, unknown_schema=JList(JObj({
+ 'features': str,
+ 'num': int,
+ 'release': str
+ }))
+ ),
+ 'features': JObj({
+ 'quorum_con': str,
+ 'quorum_mon': JList(str),
+ 'required_con': str,
+ 'required_mon': JList(str)
+ }),
+ 'monmap': JObj({
+ # @TODO: expand on monmap schema
+ 'mons': JList(JLeaf(dict)),
+ }, allow_unknown=True),
+ 'name': str,
+ 'outside_quorum': JList(int),
+ 'quorum': JList(int),
+ 'quorum_age': int,
+ 'rank': int,
+ 'state': str,
+ # @TODO: What type should be expected here?
+ 'sync_provider': JList(JAny(none=True)),
+ 'stretch_mode': bool
+ }),
+ 'osd_map': JObj({
+ # @TODO: define schema for crush map and osd_metadata, among
+ # others
+ 'osds': JList(
+ JObj({
+ 'in': int,
+ 'up': int,
+ }, allow_unknown=True)),
+ }, allow_unknown=True),
+ 'pg_info': self.__pg_info_schema,
+ 'pools': JList(JLeaf(dict)),
+ 'rgw': int,
+ 'scrub_status': str
+ })
+ self.assertSchema(data, schema)
+
+ cluster_pools = self.ceph_cluster.mon_manager.list_pools()
+ self.assertEqual(len(cluster_pools), len(data['pools']))
+ for pool in data['pools']:
+ self.assertIn(pool['pool_name'], cluster_pools)
+
+ @DashboardTestCase.RunAs('test', 'test', ['pool-manager'])
+ def test_health_permissions(self):
+ data = self._get('/api/health/full')
+ self.assertStatus(200)
+
+ schema = JObj({
+ 'client_perf': JObj({}, allow_unknown=True),
+ 'df': JObj({}, allow_unknown=True),
+ 'health': JObj({
+ 'checks': JList(JObj({}, allow_unknown=True)),
+ 'mutes': JList(JObj({}, allow_unknown=True)),
+ 'status': str
+ }),
+ 'pools': JList(JLeaf(dict)),
+ })
+ self.assertSchema(data, schema)
+
+ cluster_pools = self.ceph_cluster.mon_manager.list_pools()
+ self.assertEqual(len(cluster_pools), len(data['pools']))
+ for pool in data['pools']:
+ self.assertIn(pool['pool_name'], cluster_pools)
diff --git a/qa/tasks/mgr/dashboard/test_host.py b/qa/tasks/mgr/dashboard/test_host.py
new file mode 100644
index 000000000..78d784473
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_host.py
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from .helper import DashboardTestCase, JList, JObj, devices_schema
+
+
+class HostControllerTest(DashboardTestCase):
+
+ AUTH_ROLES = ['read-only']
+
+ URL_HOST = '/api/host'
+ URL_UI_HOST = '/ui-api/host'
+
+ ORCHESTRATOR = True
+
+ @classmethod
+ def setUpClass(cls):
+ super(HostControllerTest, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ cmd = ['test_orchestrator', 'load_data', '-i', '-']
+ cls.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd, stdin='{}')
+
+ @property
+ def test_data_inventory(self):
+ return self.ORCHESTRATOR_TEST_DATA['inventory']
+
+ @property
+ def test_data_daemons(self):
+ return self.ORCHESTRATOR_TEST_DATA['daemons']
+
+ @DashboardTestCase.RunAs('test', 'test', ['block-manager'])
+ def test_access_permissions(self):
+ self._get(self.URL_HOST, version='1.1')
+ self.assertStatus(403)
+
+ def test_host_list(self):
+ data = self._get(self.URL_HOST, version='1.1')
+ self.assertStatus(200)
+
+ orch_hostnames = {inventory_node['name'] for inventory_node in
+ self.ORCHESTRATOR_TEST_DATA['inventory']}
+
+ for server in data:
+ self.assertIn('services', server)
+ self.assertIn('hostname', server)
+ self.assertIn('ceph_version', server)
+ self.assertIsNotNone(server['hostname'])
+ self.assertIsNotNone(server['ceph_version'])
+ for service in server['services']:
+ self.assertIn('type', service)
+ self.assertIn('id', service)
+ self.assertIsNotNone(service['type'])
+ self.assertIsNotNone(service['id'])
+
+ self.assertIn('sources', server)
+ in_ceph, in_orchestrator = server['sources']['ceph'], server['sources']['orchestrator']
+ if in_ceph:
+ self.assertGreaterEqual(len(server['services']), 1)
+ if not in_orchestrator:
+ self.assertNotIn(server['hostname'], orch_hostnames)
+ if in_orchestrator:
+ self.assertEqual(len(server['services']), 0)
+ self.assertIn(server['hostname'], orch_hostnames)
+
+ def test_host_list_with_sources(self):
+ data = self._get('{}?sources=orchestrator'.format(self.URL_HOST), version='1.1')
+ self.assertStatus(200)
+ test_hostnames = {inventory_node['name'] for inventory_node in
+ self.ORCHESTRATOR_TEST_DATA['inventory']}
+ resp_hostnames = {host['hostname'] for host in data}
+ self.assertEqual(test_hostnames, resp_hostnames)
+
+ data = self._get('{}?sources=ceph'.format(self.URL_HOST), version='1.1')
+ self.assertStatus(200)
+ test_hostnames = {inventory_node['name'] for inventory_node in
+ self.ORCHESTRATOR_TEST_DATA['inventory']}
+ resp_hostnames = {host['hostname'] for host in data}
+ self.assertEqual(len(test_hostnames.intersection(resp_hostnames)), 0)
+
+ def test_host_devices(self):
+ hosts = self._get('{}'.format(self.URL_HOST), version='1.1')
+ hosts = [host['hostname'] for host in hosts if host['hostname'] != '']
+ assert hosts[0]
+ data = self._get('{}/devices'.format('{}/{}'.format(self.URL_HOST, hosts[0])))
+ self.assertStatus(200)
+ self.assertSchema(data, devices_schema)
+
+ def test_host_daemons(self):
+ hosts = self._get('{}'.format(self.URL_HOST), version='1.1')
+ hosts = [host['hostname'] for host in hosts if host['hostname'] != '']
+ assert hosts[0]
+ data = self._get('{}/daemons'.format('{}/{}'.format(self.URL_HOST, hosts[0])))
+ self.assertStatus(200)
+ self.assertSchema(data, JList(JObj({
+ 'hostname': str,
+ 'daemon_id': str,
+ 'daemon_type': str
+ })))
+
+ def test_host_smart(self):
+ hosts = self._get('{}'.format(self.URL_HOST), version='1.1')
+ hosts = [host['hostname'] for host in hosts if host['hostname'] != '']
+ assert hosts[0]
+ self._get('{}/smart'.format('{}/{}'.format(self.URL_HOST, hosts[0])))
+ self.assertStatus(200)
+
+ def _validate_inventory(self, data, resp_data):
+ self.assertEqual(data['name'], resp_data['name'])
+ self.assertEqual(len(data['devices']), len(resp_data['devices']))
+
+ if not data['devices']:
+ return
+ test_devices = sorted(data['devices'], key=lambda d: d['path'])
+ resp_devices = sorted(resp_data['devices'], key=lambda d: d['path'])
+
+ for test, resp in zip(test_devices, resp_devices):
+ self._validate_device(test, resp)
+
+ def _validate_device(self, data, resp_data):
+ for key, value in data.items():
+ self.assertEqual(value, resp_data[key])
+
+ def test_inventory_get(self):
+ # get a inventory
+ node = self.test_data_inventory[0]
+ resp = self._get('{}/{}/inventory'.format(self.URL_HOST, node['name']))
+ self.assertStatus(200)
+ self._validate_inventory(node, resp)
+
+ def test_inventory_list(self):
+ # get all inventory
+ data = self._get('{}/inventory'.format(self.URL_UI_HOST))
+ self.assertStatus(200)
+
+ def sorting_key(node):
+ return node['name']
+
+ test_inventory = sorted(self.test_data_inventory, key=sorting_key)
+ resp_inventory = sorted(data, key=sorting_key)
+ self.assertEqual(len(test_inventory), len(resp_inventory))
+ for test, resp in zip(test_inventory, resp_inventory):
+ self._validate_inventory(test, resp)
+
+
+class HostControllerNoOrchestratorTest(DashboardTestCase):
+ def test_host_create(self):
+ self._post('/api/host?hostname=foo', {'status': ''}, version='0.1')
+ self.assertStatus(503)
+ self.assertError(code='orchestrator_status_unavailable',
+ component='orchestrator')
+
+ def test_host_delete(self):
+ self._delete('/api/host/bar')
+ self.assertStatus(503)
+ self.assertError(code='orchestrator_status_unavailable',
+ component='orchestrator')
diff --git a/qa/tasks/mgr/dashboard/test_logs.py b/qa/tasks/mgr/dashboard/test_logs.py
new file mode 100644
index 000000000..63f6e16ed
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_logs.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from .helper import DashboardTestCase, JList, JObj, addrvec_schema
+
+
+class LogsTest(DashboardTestCase):
+ CEPHFS = True
+
+ def test_logs(self):
+ data = self._get("/api/logs/all")
+ self.assertStatus(200)
+ log_entry_schema = JList(JObj({
+ 'addrs': JObj({
+ 'addrvec': addrvec_schema
+ }),
+ 'channel': str,
+ 'message': str,
+ 'name': str,
+ 'priority': str,
+ 'rank': str,
+ 'seq': int,
+ 'stamp': str
+ }))
+ schema = JObj({
+ 'audit_log': log_entry_schema,
+ 'clog': log_entry_schema
+ })
+ self.assertSchema(data, schema)
+
+ @DashboardTestCase.RunAs('test', 'test', ['pool-manager'])
+ def test_log_perms(self):
+ self._get("/api/logs/all")
+ self.assertStatus(403)
diff --git a/qa/tasks/mgr/dashboard/test_mgr_module.py b/qa/tasks/mgr/dashboard/test_mgr_module.py
new file mode 100644
index 000000000..c196c7124
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_mgr_module.py
@@ -0,0 +1,154 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import logging
+
+import requests
+
+from .helper import (DashboardTestCase, JLeaf, JList, JObj,
+ module_options_object_schema, module_options_schema)
+
+logger = logging.getLogger(__name__)
+
+
+class MgrModuleTestCase(DashboardTestCase):
+ MGRS_REQUIRED = 1
+
+ def wait_until_rest_api_accessible(self):
+ """
+ Wait until the REST API is accessible.
+ """
+
+ def _check_connection():
+ try:
+ # Try reaching an API endpoint successfully.
+ self._get('/api/mgr/module')
+ if self._resp.status_code == 200:
+ return True
+ except requests.ConnectionError:
+ pass
+ return False
+
+ self.wait_until_true(_check_connection, timeout=30)
+
+
+class MgrModuleTest(MgrModuleTestCase):
+
+ def test_list_disabled_module(self):
+ self._ceph_cmd(['mgr', 'module', 'disable', 'iostat'])
+ self.wait_until_rest_api_accessible()
+ data = self._get('/api/mgr/module')
+ self.assertStatus(200)
+ self.assertSchema(
+ data,
+ JList(
+ JObj(sub_elems={
+ 'name': JLeaf(str),
+ 'enabled': JLeaf(bool),
+ 'always_on': JLeaf(bool),
+ 'options': module_options_schema
+ })))
+ module_info = self.find_object_in_list('name', 'iostat', data)
+ self.assertIsNotNone(module_info)
+ self.assertFalse(module_info['enabled'])
+
+ def test_list_enabled_module(self):
+ self._ceph_cmd(['mgr', 'module', 'enable', 'iostat'])
+ self.wait_until_rest_api_accessible()
+ data = self._get('/api/mgr/module')
+ self.assertStatus(200)
+ self.assertSchema(
+ data,
+ JList(
+ JObj(sub_elems={
+ 'name': JLeaf(str),
+ 'enabled': JLeaf(bool),
+ 'always_on': JLeaf(bool),
+ 'options': module_options_schema
+ })))
+ module_info = self.find_object_in_list('name', 'iostat', data)
+ self.assertIsNotNone(module_info)
+ self.assertTrue(module_info['enabled'])
+
+ def test_get(self):
+ data = self._get('/api/mgr/module/telemetry')
+ self.assertStatus(200)
+ self.assertSchema(
+ data,
+ JObj(
+ allow_unknown=True,
+ sub_elems={
+ 'channel_basic': bool,
+ 'channel_ident': bool,
+ 'channel_crash': bool,
+ 'channel_device': bool,
+ 'channel_perf': bool,
+ 'contact': str,
+ 'description': str,
+ 'enabled': bool,
+ 'interval': int,
+ 'last_opt_revision': int,
+ 'leaderboard': bool,
+ 'leaderboard_description': str,
+ 'organization': str,
+ 'proxy': str,
+ 'url': str
+ }))
+
+ def test_module_options(self):
+ data = self._get('/api/mgr/module/telemetry/options')
+ self.assertStatus(200)
+ schema = JObj({
+ 'channel_basic': module_options_object_schema,
+ 'channel_crash': module_options_object_schema,
+ 'channel_device': module_options_object_schema,
+ 'channel_ident': module_options_object_schema,
+ 'channel_perf': module_options_object_schema,
+ 'contact': module_options_object_schema,
+ 'description': module_options_object_schema,
+ 'device_url': module_options_object_schema,
+ 'enabled': module_options_object_schema,
+ 'interval': module_options_object_schema,
+ 'last_opt_revision': module_options_object_schema,
+ 'leaderboard': module_options_object_schema,
+ 'leaderboard_description': module_options_object_schema,
+ 'log_level': module_options_object_schema,
+ 'log_to_cluster': module_options_object_schema,
+ 'log_to_cluster_level': module_options_object_schema,
+ 'log_to_file': module_options_object_schema,
+ 'organization': module_options_object_schema,
+ 'proxy': module_options_object_schema,
+ 'url': module_options_object_schema
+ })
+ self.assertSchema(data, schema)
+
+ def test_module_enable(self):
+ self._post('/api/mgr/module/telemetry/enable')
+ self.assertStatus(200)
+
+ def test_disable(self):
+ self._post('/api/mgr/module/iostat/disable')
+ self.assertStatus(200)
+
+ def test_put(self):
+ self.set_config_key('config/mgr/mgr/iostat/log_level', 'critical')
+ self.set_config_key('config/mgr/mgr/iostat/log_to_cluster', 'False')
+ self.set_config_key('config/mgr/mgr/iostat/log_to_cluster_level', 'info')
+ self.set_config_key('config/mgr/mgr/iostat/log_to_file', 'True')
+ self._put(
+ '/api/mgr/module/iostat',
+ data={
+ 'config': {
+ 'log_level': 'debug',
+ 'log_to_cluster': True,
+ 'log_to_cluster_level': 'warning',
+ 'log_to_file': False
+ }
+ })
+ self.assertStatus(200)
+ data = self._get('/api/mgr/module/iostat')
+ self.assertStatus(200)
+ self.assertEqual(data['log_level'], 'debug')
+ self.assertTrue(data['log_to_cluster'])
+ self.assertEqual(data['log_to_cluster_level'], 'warning')
+ self.assertFalse(data['log_to_file'])
diff --git a/qa/tasks/mgr/dashboard/test_monitor.py b/qa/tasks/mgr/dashboard/test_monitor.py
new file mode 100644
index 000000000..e32c2c10c
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_monitor.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from .helper import DashboardTestCase
+
+
+class MonitorTest(DashboardTestCase):
+ AUTH_ROLES = ['cluster-manager']
+
+ @DashboardTestCase.RunAs('test', 'test', ['block-manager'])
+ def test_access_permissions(self):
+ self._get('/api/monitor')
+ self.assertStatus(403)
+
+ def test_monitor_default(self):
+ data = self._get("/api/monitor")
+ self.assertStatus(200)
+
+ self.assertIn('mon_status', data)
+ self.assertIn('in_quorum', data)
+ self.assertIn('out_quorum', data)
+ self.assertIsNotNone(data['mon_status'])
+ self.assertIsNotNone(data['in_quorum'])
+ self.assertIsNotNone(data['out_quorum'])
diff --git a/qa/tasks/mgr/dashboard/test_motd.py b/qa/tasks/mgr/dashboard/test_motd.py
new file mode 100644
index 000000000..2edbf36ba
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_motd.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=too-many-public-methods
+
+from __future__ import absolute_import
+
+import time
+
+from .helper import DashboardTestCase
+
+
+class MotdTest(DashboardTestCase):
+ @classmethod
+ def tearDownClass(cls):
+ cls._ceph_cmd(['dashboard', 'motd', 'clear'])
+ super(MotdTest, cls).tearDownClass()
+
+ def setUp(self):
+ super(MotdTest, self).setUp()
+ self._ceph_cmd(['dashboard', 'motd', 'clear'])
+
+ def test_none(self):
+ data = self._get('/ui-api/motd')
+ self.assertStatus(200)
+ self.assertIsNone(data)
+
+ def test_set(self):
+ self._ceph_cmd(['dashboard', 'motd', 'set', 'info', '0', 'foo bar baz'])
+ data = self._get('/ui-api/motd')
+ self.assertStatus(200)
+ self.assertIsInstance(data, dict)
+
+ def test_expired(self):
+ self._ceph_cmd(['dashboard', 'motd', 'set', 'info', '2s', 'foo bar baz'])
+ time.sleep(5)
+ data = self._get('/ui-api/motd')
+ self.assertStatus(200)
+ self.assertIsNone(data)
diff --git a/qa/tasks/mgr/dashboard/test_orchestrator.py b/qa/tasks/mgr/dashboard/test_orchestrator.py
new file mode 100644
index 000000000..2a804c4c2
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_orchestrator.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from .helper import DashboardTestCase
+
+
+class OrchestratorControllerTest(DashboardTestCase):
+
+ AUTH_ROLES = ['cluster-manager']
+
+ URL_STATUS = '/ui-api/orchestrator/status'
+
+ ORCHESTRATOR = True
+
+ @classmethod
+ def setUpClass(cls):
+ super(OrchestratorControllerTest, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ cmd = ['test_orchestrator', 'load_data', '-i', '-']
+ cls.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd, stdin='{}')
+
+ def test_status_get(self):
+ data = self._get(self.URL_STATUS)
+ self.assertStatus(200)
+ self.assertTrue(data['available'])
diff --git a/qa/tasks/mgr/dashboard/test_osd.py b/qa/tasks/mgr/dashboard/test_osd.py
new file mode 100644
index 000000000..71cf3d871
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_osd.py
@@ -0,0 +1,368 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+import json
+
+from .helper import (DashboardTestCase, JAny, JLeaf, JList, JObj, JTuple,
+ devices_schema)
+
+
+class OsdTest(DashboardTestCase):
+
+ AUTH_ROLES = ['cluster-manager']
+
+ @classmethod
+ def setUpClass(cls):
+ super(OsdTest, cls).setUpClass()
+ cls._load_module('test_orchestrator')
+ cmd = ['orch', 'set', 'backend', 'test_orchestrator']
+ cls.mgr_cluster.mon_manager.raw_cluster_cmd(*cmd)
+
+ def tearDown(self):
+ self._put('/api/osd/0/mark', data={'action': 'in'})
+
+ @DashboardTestCase.RunAs('test', 'test', ['block-manager'])
+ def test_access_permissions(self):
+ self._get('/api/osd')
+ self.assertStatus(403)
+ self._get('/api/osd/0')
+ self.assertStatus(403)
+
+ def assert_in_and_not_none(self, data, properties):
+ self.assertSchema(data, JObj({p: JAny(none=False) for p in properties}, allow_unknown=True))
+
+ def test_list(self):
+ data = self._get('/api/osd')
+ self.assertStatus(200)
+
+ self.assertGreaterEqual(len(data), 1)
+ data = data[0]
+ self.assert_in_and_not_none(data, ['host', 'tree', 'state', 'stats', 'stats_history'])
+ self.assert_in_and_not_none(data['host'], ['name'])
+ self.assert_in_and_not_none(data['tree'], ['id'])
+ self.assert_in_and_not_none(data['stats'], ['numpg', 'stat_bytes_used', 'stat_bytes',
+ 'op_r', 'op_w'])
+ self.assert_in_and_not_none(data['stats_history'], ['op_out_bytes', 'op_in_bytes'])
+ self.assertSchema(data['stats_history']['op_out_bytes'],
+ JList(JTuple([JLeaf(float), JLeaf(float)])))
+
+ def test_details(self):
+ data = self._get('/api/osd/0')
+ self.assertStatus(200)
+ self.assert_in_and_not_none(data, ['osd_metadata'])
+
+ def test_histogram(self):
+ data = self._get('/api/osd/0/histogram')
+ self.assertStatus(200)
+ self.assert_in_and_not_none(data['osd'], ['op_w_latency_in_bytes_histogram',
+ 'op_r_latency_out_bytes_histogram'])
+
+ def test_scrub(self):
+ self._post('/api/osd/0/scrub?deep=False')
+ self.assertStatus(200)
+
+ self._post('/api/osd/0/scrub?deep=True')
+ self.assertStatus(200)
+
+ def test_safe_to_delete(self):
+ data = self._get('/api/osd/safe_to_delete?svc_ids=0')
+ self.assertStatus(200)
+ self.assertSchema(data, JObj({
+ 'is_safe_to_delete': JAny(none=True),
+ 'message': str
+ }))
+ self.assertTrue(data['is_safe_to_delete'])
+
+ def test_osd_smart(self):
+ self._get('/api/osd/0/smart')
+ self.assertStatus(200)
+
+ def test_mark_out_and_in(self):
+ self._put('/api/osd/0/mark', data={'action': 'out'})
+ self.assertStatus(200)
+
+ self._put('/api/osd/0/mark', data={'action': 'in'})
+ self.assertStatus(200)
+
+ def test_mark_down(self):
+ self._put('/api/osd/0/mark', data={'action': 'down'})
+ self.assertStatus(200)
+
+ def test_reweight(self):
+ self._post('/api/osd/0/reweight', {'weight': 0.4})
+ self.assertStatus(200)
+
+ def get_reweight_value():
+ self._get('/api/osd/0')
+ response = self.jsonBody()
+ if 'osd_map' in response and 'weight' in response['osd_map']:
+ return round(response['osd_map']['weight'], 1)
+ return None
+ self.wait_until_equal(get_reweight_value, 0.4, 10)
+ self.assertStatus(200)
+
+ # Undo
+ self._post('/api/osd/0/reweight', {'weight': 1})
+
+ def test_create_lost_destroy_remove(self):
+ sample_data = {
+ 'uuid': 'f860ca2e-757d-48ce-b74a-87052cad563f',
+ 'svc_id': 5
+ }
+
+ # Create
+ self._task_post('/api/osd', {
+ 'method': 'bare',
+ 'data': sample_data,
+ 'tracking_id': 'bare-5'
+ })
+ self.assertStatus(201)
+
+ # invalid method
+ self._task_post('/api/osd', {
+ 'method': 'xyz',
+ 'data': {
+ 'uuid': 'f860ca2e-757d-48ce-b74a-87052cad563f',
+ 'svc_id': 5
+ },
+ 'tracking_id': 'bare-5'
+ })
+ self.assertStatus(400)
+
+ # Lost
+ self._put('/api/osd/5/mark', data={'action': 'lost'})
+ self.assertStatus(200)
+ # Destroy
+ self._post('/api/osd/5/destroy')
+ self.assertStatus(200)
+ # Purge
+ self._post('/api/osd/5/purge')
+ self.assertStatus(200)
+
+ def test_create_with_drive_group(self):
+ data = {
+ 'method': 'drive_groups',
+ 'data': [
+ {
+ 'service_type': 'osd',
+ 'service_id': 'test',
+ 'host_pattern': '*',
+ 'data_devices': {
+ 'vendor': 'abc',
+ 'model': 'cba',
+ 'rotational': True,
+ 'size': '4 TB'
+ },
+ 'wal_devices': {
+ 'vendor': 'def',
+ 'model': 'fed',
+ 'rotational': False,
+ 'size': '1 TB'
+ },
+ 'db_devices': {
+ 'vendor': 'ghi',
+ 'model': 'ihg',
+ 'rotational': False,
+ 'size': '512 GB'
+ },
+ 'wal_slots': 5,
+ 'db_slots': 5,
+ 'encrypted': True
+ }
+ ],
+ 'tracking_id': 'test'
+ }
+ self._post('/api/osd', data)
+ self.assertStatus(201)
+
+ def test_safe_to_destroy(self):
+ osd_dump = json.loads(self._ceph_cmd(['osd', 'dump', '-f', 'json']))
+ max_id = max(map(lambda e: e['osd'], osd_dump['osds']))
+
+ def get_pg_status_equal_unknown(osd_ids):
+ self._get('/api/osd/safe_to_destroy?ids={}'.format(osd_ids))
+ if 'message' in self.jsonBody():
+ return 'pgs have unknown state' in self.jsonBody()['message']
+ return False
+
+ # 1 OSD safe to destroy
+ unused_osd_id = max_id + 10
+ self.wait_until_equal(
+ lambda: get_pg_status_equal_unknown(unused_osd_id), False, 30)
+ self.assertStatus(200)
+ self.assertJsonBody({
+ 'is_safe_to_destroy': True,
+ 'active': [],
+ 'missing_stats': [],
+ 'safe_to_destroy': [unused_osd_id],
+ 'stored_pgs': [],
+ })
+
+ # multiple OSDs safe to destroy
+ unused_osd_ids = [max_id + 11, max_id + 12]
+ self.wait_until_equal(
+ lambda: get_pg_status_equal_unknown(str(unused_osd_ids)), False, 30)
+ self.assertStatus(200)
+ self.assertJsonBody({
+ 'is_safe_to_destroy': True,
+ 'active': [],
+ 'missing_stats': [],
+ 'safe_to_destroy': unused_osd_ids,
+ 'stored_pgs': [],
+ })
+
+ # 1 OSD unsafe to destroy
+ def get_destroy_status():
+ self._get('/api/osd/safe_to_destroy?ids=0')
+ if 'is_safe_to_destroy' in self.jsonBody():
+ return self.jsonBody()['is_safe_to_destroy']
+ return None
+ self.wait_until_equal(get_destroy_status, False, 10)
+ self.assertStatus(200)
+
+ def test_osd_devices(self):
+ data = self._get('/api/osd/0/devices')
+ self.assertStatus(200)
+ self.assertSchema(data, devices_schema)
+
+
+class OsdFlagsTest(DashboardTestCase):
+ def __init__(self, *args, **kwargs):
+ super(OsdFlagsTest, self).__init__(*args, **kwargs)
+ self._initial_flags = ['sortbitwise', 'recovery_deletes', 'purged_snapdirs',
+ 'pglog_hardlimit'] # These flags cannot be unset
+
+ @classmethod
+ def _put_flags(cls, flags, ids=None):
+ url = '/api/osd/flags'
+ data = {'flags': flags}
+
+ if ids:
+ url = url + '/individual'
+ data['ids'] = ids
+
+ cls._put(url, data=data)
+ return cls._resp.json()
+
+ def test_list_osd_flags(self):
+ flags = self._get('/api/osd/flags')
+ self.assertStatus(200)
+ self.assertEqual(len(flags), 4)
+ self.assertCountEqual(flags, self._initial_flags)
+
+ def test_add_osd_flag(self):
+ flags = self._put_flags([
+ 'sortbitwise', 'recovery_deletes', 'purged_snapdirs', 'noout',
+ 'pause', 'pglog_hardlimit'
+ ])
+ self.assertCountEqual(flags, [
+ 'sortbitwise', 'recovery_deletes', 'purged_snapdirs', 'noout',
+ 'pause', 'pglog_hardlimit'
+ ])
+
+ # Restore flags
+ self._put_flags(self._initial_flags)
+
+ def test_get_indiv_flag(self):
+ initial = self._get('/api/osd/flags/individual')
+ self.assertStatus(200)
+ self.assertSchema(initial, JList(JObj({
+ 'osd': int,
+ 'flags': JList(str)
+ })))
+
+ self._ceph_cmd(['osd', 'set-group', 'noout,noin', 'osd.0', 'osd.1', 'osd.2'])
+ flags_added = self._get('/api/osd/flags/individual')
+ self.assertStatus(200)
+ for osd in flags_added:
+ if osd['osd'] in [0, 1, 2]:
+ self.assertIn('noout', osd['flags'])
+ self.assertIn('noin', osd['flags'])
+ for osd_initial in initial:
+ if osd['osd'] == osd_initial['osd']:
+ self.assertGreater(len(osd['flags']), len(osd_initial['flags']))
+
+ self._ceph_cmd(['osd', 'unset-group', 'noout,noin', 'osd.0', 'osd.1', 'osd.2'])
+ flags_removed = self._get('/api/osd/flags/individual')
+ self.assertStatus(200)
+ for osd in flags_removed:
+ if osd['osd'] in [0, 1, 2]:
+ self.assertNotIn('noout', osd['flags'])
+ self.assertNotIn('noin', osd['flags'])
+
+ def test_add_indiv_flag(self):
+ flags_update = {'noup': None, 'nodown': None, 'noin': None, 'noout': True}
+ svc_id = 0
+
+ resp = self._put_flags(flags_update, [svc_id])
+ self._check_indiv_flags_resp(resp, [svc_id], ['noout'], [], ['noup', 'nodown', 'noin'])
+ self._check_indiv_flags_osd([svc_id], ['noout'], ['noup', 'nodown', 'noin'])
+
+ self._ceph_cmd(['osd', 'unset-group', 'noout', 'osd.{}'.format(svc_id)])
+
+ def test_add_multiple_indiv_flags(self):
+ flags_update = {'noup': None, 'nodown': None, 'noin': True, 'noout': True}
+ svc_id = 0
+
+ resp = self._put_flags(flags_update, [svc_id])
+ self._check_indiv_flags_resp(resp, [svc_id], ['noout', 'noin'], [], ['noup', 'nodown'])
+ self._check_indiv_flags_osd([svc_id], ['noout', 'noin'], ['noup', 'nodown'])
+
+ self._ceph_cmd(['osd', 'unset-group', 'noout,noin', 'osd.{}'.format(svc_id)])
+
+ def test_add_multiple_indiv_flags_multiple_osds(self):
+ flags_update = {'noup': None, 'nodown': None, 'noin': True, 'noout': True}
+ svc_id = [0, 1, 2]
+
+ resp = self._put_flags(flags_update, svc_id)
+ self._check_indiv_flags_resp(resp, svc_id, ['noout', 'noin'], [], ['noup', 'nodown'])
+ self._check_indiv_flags_osd([svc_id], ['noout', 'noin'], ['noup', 'nodown'])
+
+ self._ceph_cmd(['osd', 'unset-group', 'noout,noin', 'osd.0', 'osd.1', 'osd.2'])
+
+ def test_remove_indiv_flag(self):
+ flags_update = {'noup': None, 'nodown': None, 'noin': None, 'noout': False}
+ svc_id = 0
+ self._ceph_cmd(['osd', 'set-group', 'noout', 'osd.{}'.format(svc_id)])
+
+ resp = self._put_flags(flags_update, [svc_id])
+ self._check_indiv_flags_resp(resp, [svc_id], [], ['noout'], ['noup', 'nodown', 'noin'])
+ self._check_indiv_flags_osd([svc_id], [], ['noup', 'nodown', 'noin', 'noout'])
+
+ def test_remove_multiple_indiv_flags(self):
+ flags_update = {'noup': None, 'nodown': None, 'noin': False, 'noout': False}
+ svc_id = 0
+ self._ceph_cmd(['osd', 'set-group', 'noout,noin', 'osd.{}'.format(svc_id)])
+
+ resp = self._put_flags(flags_update, [svc_id])
+ self._check_indiv_flags_resp(resp, [svc_id], [], ['noout', 'noin'], ['noup', 'nodown'])
+ self._check_indiv_flags_osd([svc_id], [], ['noout', 'noin', 'noup', 'nodown'])
+
+ def test_remove_multiple_indiv_flags_multiple_osds(self):
+ flags_update = {'noup': None, 'nodown': None, 'noin': False, 'noout': False}
+ svc_id = [0, 1, 2]
+ self._ceph_cmd(['osd', 'unset-group', 'noout,noin', 'osd.0', 'osd.1', 'osd.2'])
+
+ resp = self._put_flags(flags_update, svc_id)
+ self._check_indiv_flags_resp(resp, svc_id, [], ['noout', 'noin'], ['noup', 'nodown'])
+ self._check_indiv_flags_osd([svc_id], [], ['noout', 'noin', 'noup', 'nodown'])
+
+ def _check_indiv_flags_resp(self, resp, ids, added, removed, ignored):
+ self.assertStatus(200)
+ self.assertCountEqual(resp['ids'], ids)
+ self.assertCountEqual(resp['added'], added)
+ self.assertCountEqual(resp['removed'], removed)
+
+ for flag in ignored:
+ self.assertNotIn(flag, resp['added'])
+ self.assertNotIn(flag, resp['removed'])
+
+ def _check_indiv_flags_osd(self, ids, activated_flags, deactivated_flags):
+ osds = json.loads(self._ceph_cmd(['osd', 'dump', '--format=json']))['osds']
+ for osd in osds:
+ if osd['osd'] in ids:
+ for flag in activated_flags:
+ self.assertIn(flag, osd['state'])
+ for flag in deactivated_flags:
+ self.assertNotIn(flag, osd['state'])
diff --git a/qa/tasks/mgr/dashboard/test_perf_counters.py b/qa/tasks/mgr/dashboard/test_perf_counters.py
new file mode 100644
index 000000000..c01368bce
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_perf_counters.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from .helper import DashboardTestCase, JObj
+
+
+class PerfCountersControllerTest(DashboardTestCase):
+
+ def test_perf_counters_list(self):
+ data = self._get('/api/perf_counters')
+ self.assertStatus(200)
+
+ self.assertIsInstance(data, dict)
+ for mon in self.mons():
+ self.assertIn('mon.{}'.format(mon), data)
+
+ osds = self.ceph_cluster.mon_manager.get_osd_dump()
+ for osd in osds:
+ self.assertIn('osd.{}'.format(osd['osd']), data)
+
+ def _validate_perf(self, srv_id, srv_type, data, allow_empty):
+ self.assertIsInstance(data, dict)
+ self.assertEqual(srv_type, data['service']['type'])
+ self.assertEqual(str(srv_id), data['service']['id'])
+ self.assertIsInstance(data['counters'], list)
+ if not allow_empty:
+ self.assertGreater(len(data['counters']), 0)
+ for counter in data['counters'][0:1]:
+ self.assertIsInstance(counter, dict)
+ self.assertIn('description', counter)
+ self.assertIn('name', counter)
+ self.assertIn('unit', counter)
+ self.assertIn('value', counter)
+
+ def test_perf_counters_mon_get(self):
+ mon = self.mons()[0]
+ data = self._get('/api/perf_counters/mon/{}'.format(mon))
+ self.assertStatus(200)
+ self._validate_perf(mon, 'mon', data, allow_empty=False)
+
+ def test_perf_counters_mgr_get(self):
+ mgr = list(self.mgr_cluster.mgr_ids)[0]
+ data = self._get('/api/perf_counters/mgr/{}'.format(mgr))
+ self.assertStatus(200)
+ self._validate_perf(mgr, 'mgr', data, allow_empty=False)
+
+ def test_perf_counters_mds_get(self):
+ for mds in self.mds_cluster.mds_ids:
+ data = self._get('/api/perf_counters/mds/{}'.format(mds))
+ self.assertStatus(200)
+ self._validate_perf(mds, 'mds', data, allow_empty=True)
+
+ def test_perf_counters_osd_get(self):
+ for osd in self.ceph_cluster.mon_manager.get_osd_dump():
+ osd = osd['osd']
+ data = self._get('/api/perf_counters/osd/{}'.format(osd))
+ self.assertStatus(200)
+ self._validate_perf(osd, 'osd', data, allow_empty=False)
+
+ def test_perf_counters_not_found(self):
+ osds = self.ceph_cluster.mon_manager.get_osd_dump()
+ unused_id = int(list(map(lambda o: o['osd'], osds)).pop()) + 1
+
+ self._get('/api/perf_counters/osd/{}'.format(unused_id))
+ self.assertStatus(404)
+ schema = JObj(sub_elems={
+ 'status': str,
+ 'detail': str,
+ }, allow_unknown=True)
+ self.assertEqual(self._resp.json()['detail'], "'osd.{}' not found".format(unused_id))
+ self.assertSchemaBody(schema)
diff --git a/qa/tasks/mgr/dashboard/test_pool.py b/qa/tasks/mgr/dashboard/test_pool.py
new file mode 100644
index 000000000..0699be48c
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_pool.py
@@ -0,0 +1,435 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import logging
+import time
+from contextlib import contextmanager
+
+from .helper import DashboardTestCase, JAny, JList, JObj, JUnion
+
+log = logging.getLogger(__name__)
+
+
+class PoolTest(DashboardTestCase):
+ AUTH_ROLES = ['pool-manager']
+
+ pool_schema = JObj(sub_elems={
+ 'pool_name': str,
+ 'type': str,
+ 'application_metadata': JList(str),
+ 'flags': int,
+ 'flags_names': str,
+ }, allow_unknown=True)
+
+ pool_list_stat_schema = JObj(sub_elems={
+ 'latest': JUnion([int, float]),
+ 'rate': float,
+ 'rates': JList(JAny(none=False)),
+ })
+
+ pool_list_stats_schema = JObj(sub_elems={
+ 'avail_raw': pool_list_stat_schema,
+ 'bytes_used': pool_list_stat_schema,
+ 'max_avail': pool_list_stat_schema,
+ 'percent_used': pool_list_stat_schema,
+ 'rd_bytes': pool_list_stat_schema,
+ 'wr_bytes': pool_list_stat_schema,
+ 'rd': pool_list_stat_schema,
+ 'wr': pool_list_stat_schema,
+ }, allow_unknown=True)
+
+ pool_rbd_conf_schema = JList(JObj(sub_elems={
+ 'name': str,
+ 'value': str,
+ 'source': int
+ }))
+
+ @contextmanager
+ def __yield_pool(self, name=None, data=None, deletion_name=None):
+ """
+ Use either just a name or whole description of a pool to create one.
+ This also validates the correct creation and deletion after the pool was used.
+
+ :param name: Name of the pool
+ :param data: Describes the pool in full length
+ :param deletion_name: Only needed if the pool was renamed
+ :return:
+ """
+ data = self._create_pool(name, data)
+ yield data
+ self._delete_pool(deletion_name or data['pool'])
+
+ def _create_pool(self, name, data):
+ data = data or {
+ 'pool': name,
+ 'pg_num': '32',
+ 'pool_type': 'replicated',
+ 'compression_algorithm': 'snappy',
+ 'compression_mode': 'passive',
+ 'compression_max_blob_size': '131072',
+ 'compression_required_ratio': '0.875',
+ 'application_metadata': ['rbd'],
+ 'configuration': {
+ 'rbd_qos_bps_limit': 1024000,
+ 'rbd_qos_iops_limit': 5000,
+ }
+ }
+ self._task_post('/api/pool/', data)
+ self.assertStatus(201)
+ self._validate_pool_properties(data, self._get_pool(data['pool']))
+ return data
+
+ def _delete_pool(self, name):
+ self._task_delete('/api/pool/' + name)
+ self.assertStatus(204)
+
+ def _validate_pool_properties(self, data, pool, timeout=DashboardTestCase.TIMEOUT_HEALTH_CLEAR):
+ # pylint: disable=too-many-branches
+ for prop, value in data.items():
+ if prop == 'pool_type':
+ self.assertEqual(pool['type'], value)
+ elif prop == 'size':
+ self.assertEqual(pool[prop], int(value),
+ '{}: {} != {}'.format(prop, pool[prop], value))
+ elif prop == 'pg_num':
+ self._check_pg_num(pool['pool_name'], int(value))
+ elif prop == 'application_metadata':
+ self.assertIsInstance(pool[prop], list)
+ self.assertEqual(value, pool[prop])
+ elif prop == 'pool':
+ self.assertEqual(pool['pool_name'], value)
+ elif prop.startswith('compression'):
+ if value is not None:
+ if prop.endswith('size'):
+ value = int(value)
+ elif prop.endswith('ratio'):
+ value = float(value)
+ self.assertEqual(pool['options'][prop], value)
+ else:
+ self.assertEqual(pool['options'], {})
+ elif prop == 'configuration':
+ # configuration cannot really be checked here for two reasons:
+ # 1. The default value cannot be given to this method, which becomes relevant
+ # when resetting a value, because it's not always zero.
+ # 2. The expected `source` cannot be given to this method, and it cannot
+ # reliably be determined (see 1)
+ pass
+ else:
+ self.assertEqual(pool[prop], value, '{}: {} != {}'.format(prop, pool[prop], value))
+
+ self.wait_for_health_clear(timeout)
+
+ def _get_pool(self, pool_name):
+ pool = self._get("/api/pool/" + pool_name)
+ self.assertStatus(200)
+ self.assertSchemaBody(self.pool_schema)
+ return pool
+
+ def _check_pg_num(self, pool_name, pg_num):
+ """
+ If both properties have not the same value, the cluster goes into a warning state, which
+ will only happen during a pg update on an existing pool. The test that does that is
+ currently commented out because our QA systems can't deal with the change. Feel free to test
+ it locally.
+ """
+ self.wait_until_equal(
+ lambda: self._get_pool(pool_name)['pg_placement_num'],
+ expect_val=pg_num,
+ timeout=180
+ )
+
+ pool = self._get_pool(pool_name)
+
+ for prop in ['pg_num', 'pg_placement_num']:
+ self.assertEqual(pool[prop], int(pg_num),
+ '{}: {} != {}'.format(prop, pool[prop], pg_num))
+
+ @DashboardTestCase.RunAs('test', 'test', [{'pool': ['create', 'update', 'delete']}])
+ def test_read_access_permissions(self):
+ self._get('/api/pool')
+ self.assertStatus(403)
+ self._get('/api/pool/bla')
+ self.assertStatus(403)
+
+ @DashboardTestCase.RunAs('test', 'test', [{'pool': ['read', 'update', 'delete']}])
+ def test_create_access_permissions(self):
+ self._task_post('/api/pool/', {})
+ self.assertStatus(403)
+
+ @DashboardTestCase.RunAs('test', 'test', [{'pool': ['read', 'create', 'update']}])
+ def test_delete_access_permissions(self):
+ self._delete('/api/pool/ddd')
+ self.assertStatus(403)
+
+ def test_pool_configuration(self):
+ pool_name = '.mgr'
+ data = self._get('/api/pool/{}/configuration'.format(pool_name))
+ self.assertStatus(200)
+ self.assertSchema(data, JList(JObj({
+ 'name': str,
+ 'value': str,
+ 'source': int
+ })))
+
+ def test_pool_list(self):
+ data = self._get("/api/pool")
+ self.assertStatus(200)
+
+ cluster_pools = self.ceph_cluster.mon_manager.list_pools()
+ self.assertEqual(len(cluster_pools), len(data))
+ self.assertSchemaBody(JList(self.pool_schema))
+ for pool in data:
+ self.assertNotIn('pg_status', pool)
+ self.assertNotIn('stats', pool)
+ self.assertIn(pool['pool_name'], cluster_pools)
+
+ def test_pool_list_attrs(self):
+ data = self._get("/api/pool?attrs=type,flags")
+ self.assertStatus(200)
+
+ cluster_pools = self.ceph_cluster.mon_manager.list_pools()
+ self.assertEqual(len(cluster_pools), len(data))
+ for pool in data:
+ self.assertIn('pool_name', pool)
+ self.assertIn('type', pool)
+ self.assertIn('flags', pool)
+ self.assertNotIn('flags_names', pool)
+ self.assertNotIn('pg_status', pool)
+ self.assertNotIn('stats', pool)
+ self.assertIn(pool['pool_name'], cluster_pools)
+
+ def test_pool_list_stats(self):
+ data = self._get("/api/pool?stats=true")
+ self.assertStatus(200)
+
+ cluster_pools = self.ceph_cluster.mon_manager.list_pools()
+ self.assertEqual(len(cluster_pools), len(data))
+ self.assertSchemaBody(JList(self.pool_schema))
+ for pool in data:
+ self.assertIn('pool_name', pool)
+ self.assertIn('type', pool)
+ self.assertIn('application_metadata', pool)
+ self.assertIn('flags', pool)
+ self.assertIn('pg_status', pool)
+ self.assertSchema(pool['stats'], self.pool_list_stats_schema)
+ self.assertIn('flags_names', pool)
+ self.assertIn(pool['pool_name'], cluster_pools)
+
+ def test_pool_get(self):
+ cluster_pools = self.ceph_cluster.mon_manager.list_pools()
+ pool = self._get("/api/pool/{}?stats=true&attrs=type,flags,stats"
+ .format(cluster_pools[0]))
+ self.assertEqual(pool['pool_name'], cluster_pools[0])
+ self.assertIn('type', pool)
+ self.assertIn('flags', pool)
+ self.assertNotIn('pg_status', pool)
+ self.assertSchema(pool['stats'], self.pool_list_stats_schema)
+ self.assertNotIn('flags_names', pool)
+ self.assertSchema(pool['configuration'], self.pool_rbd_conf_schema)
+
+ def test_pool_create_with_two_applications(self):
+ self.__yield_pool(None, {
+ 'pool': 'dashboard_pool1',
+ 'pg_num': '32',
+ 'pool_type': 'replicated',
+ 'application_metadata': ['rbd', 'sth'],
+ })
+
+ def test_pool_create_with_ecp_and_rule(self):
+ self._ceph_cmd(['osd', 'crush', 'rule', 'create-erasure', 'ecrule'])
+ self._ceph_cmd(
+ ['osd', 'erasure-code-profile', 'set', 'ecprofile', 'crush-failure-domain=osd'])
+ self.__yield_pool(None, {
+ 'pool': 'dashboard_pool2',
+ 'pg_num': '32',
+ 'pool_type': 'erasure',
+ 'application_metadata': ['rbd'],
+ 'erasure_code_profile': 'ecprofile',
+ 'crush_rule': 'ecrule',
+ })
+ self._ceph_cmd(['osd', 'erasure-code-profile', 'rm', 'ecprofile'])
+
+ def test_pool_create_with_compression(self):
+ pool = {
+ 'pool': 'dashboard_pool3',
+ 'pg_num': '32',
+ 'pool_type': 'replicated',
+ 'compression_algorithm': 'zstd',
+ 'compression_mode': 'aggressive',
+ 'compression_max_blob_size': '10000000',
+ 'compression_required_ratio': '0.8',
+ 'application_metadata': ['rbd'],
+ 'configuration': {
+ 'rbd_qos_bps_limit': 2048,
+ 'rbd_qos_iops_limit': None,
+ },
+ }
+ with self.__yield_pool(None, pool):
+ expected_configuration = [{
+ 'name': 'rbd_qos_bps_limit',
+ 'source': 1,
+ 'value': '2048',
+ }, {
+ 'name': 'rbd_qos_iops_limit',
+ 'source': 0,
+ 'value': '0',
+ }]
+ new_pool = self._get_pool(pool['pool'])
+ for conf in expected_configuration:
+ self.assertIn(conf, new_pool['configuration'])
+
+ def test_pool_create_with_quotas(self):
+ pools = [
+ {
+ 'pool_data': {
+ 'pool': 'dashboard_pool_quota1',
+ 'pg_num': '32',
+ 'pool_type': 'replicated',
+ 'application_metadata': ['rbd'],
+ },
+ 'pool_quotas_to_check': {
+ 'quota_max_objects': 0,
+ 'quota_max_bytes': 0,
+ }
+ },
+ {
+ 'pool_data': {
+ 'pool': 'dashboard_pool_quota2',
+ 'pg_num': '32',
+ 'pool_type': 'replicated',
+ 'application_metadata': ['rbd'],
+ 'quota_max_objects': 1024,
+ 'quota_max_bytes': 1000,
+ },
+ 'pool_quotas_to_check': {
+ 'quota_max_objects': 1024,
+ 'quota_max_bytes': 1000,
+ }
+ }
+ ]
+
+ for pool in pools:
+ pool_name = pool['pool_data']['pool']
+ with self.__yield_pool(pool_name, pool['pool_data']):
+ self._validate_pool_properties(pool['pool_quotas_to_check'],
+ self._get_pool(pool_name))
+
+ def test_pool_update_name(self):
+ name = 'pool_update'
+ updated_name = 'pool_updated_name'
+ with self.__yield_pool(name, None, updated_name):
+ props = {'pool': updated_name}
+ self._task_put('/api/pool/{}'.format(name), props)
+ time.sleep(5)
+ self.assertStatus(200)
+ self._validate_pool_properties(props, self._get_pool(updated_name))
+
+ def test_pool_update_metadata(self):
+ pool_name = 'pool_update_metadata'
+ with self.__yield_pool(pool_name):
+ props = {'application_metadata': ['rbd', 'sth']}
+ self._task_put('/api/pool/{}'.format(pool_name), props)
+ self._validate_pool_properties(props, self._get_pool(pool_name),
+ self.TIMEOUT_HEALTH_CLEAR * 2)
+
+ properties = {'application_metadata': ['rgw']}
+ self._task_put('/api/pool/' + pool_name, properties)
+ self._validate_pool_properties(properties, self._get_pool(pool_name),
+ self.TIMEOUT_HEALTH_CLEAR * 2)
+
+ properties = {'application_metadata': ['rbd', 'sth']}
+ self._task_put('/api/pool/' + pool_name, properties)
+ self._validate_pool_properties(properties, self._get_pool(pool_name),
+ self.TIMEOUT_HEALTH_CLEAR * 2)
+
+ properties = {'application_metadata': ['rgw']}
+ self._task_put('/api/pool/' + pool_name, properties)
+ self._validate_pool_properties(properties, self._get_pool(pool_name),
+ self.TIMEOUT_HEALTH_CLEAR * 2)
+
+ def test_pool_update_configuration(self):
+ pool_name = 'pool_update_configuration'
+ with self.__yield_pool(pool_name):
+ configuration = {
+ 'rbd_qos_bps_limit': 1024,
+ 'rbd_qos_iops_limit': None,
+ }
+ expected_configuration = [{
+ 'name': 'rbd_qos_bps_limit',
+ 'source': 1,
+ 'value': '1024',
+ }, {
+ 'name': 'rbd_qos_iops_limit',
+ 'source': 0,
+ 'value': '0',
+ }]
+ self._task_put('/api/pool/' + pool_name, {'configuration': configuration})
+ time.sleep(5)
+ pool_config = self._get_pool(pool_name)['configuration']
+ for conf in expected_configuration:
+ self.assertIn(conf, pool_config)
+
+ def test_pool_update_compression(self):
+ pool_name = 'pool_update_compression'
+ with self.__yield_pool(pool_name):
+ properties = {
+ 'compression_algorithm': 'zstd',
+ 'compression_mode': 'aggressive',
+ 'compression_max_blob_size': '10000000',
+ 'compression_required_ratio': '0.8',
+ }
+ self._task_put('/api/pool/' + pool_name, properties)
+ time.sleep(5)
+ self._validate_pool_properties(properties, self._get_pool(pool_name))
+
+ def test_pool_update_unset_compression(self):
+ pool_name = 'pool_update_unset_compression'
+ with self.__yield_pool(pool_name):
+ self._task_put('/api/pool/' + pool_name, {'compression_mode': 'unset'})
+ time.sleep(5)
+ self._validate_pool_properties({
+ 'compression_algorithm': None,
+ 'compression_mode': None,
+ 'compression_max_blob_size': None,
+ 'compression_required_ratio': None,
+ }, self._get_pool(pool_name))
+
+ def test_pool_update_quotas(self):
+ pool_name = 'pool_update_quotas'
+ with self.__yield_pool(pool_name):
+ properties = {
+ 'quota_max_objects': 1024,
+ 'quota_max_bytes': 1000,
+ }
+ self._task_put('/api/pool/' + pool_name, properties)
+ time.sleep(5)
+ self._validate_pool_properties(properties, self._get_pool(pool_name))
+
+ def test_pool_create_fail(self):
+ data = {'pool_type': u'replicated', 'rule_name': u'dnf', 'pg_num': u'8', 'pool': u'sadfs'}
+ self._task_post('/api/pool/', data)
+ self.assertStatus(400)
+ self.assertJsonBody({
+ 'component': 'pool',
+ 'code': "2",
+ 'detail': "[errno -2] specified rule dnf doesn't exist"
+ })
+
+ def test_pool_info(self):
+ self._get("/ui-api/pool/info")
+ self.assertSchemaBody(JObj({
+ 'pool_names': JList(str),
+ 'compression_algorithms': JList(str),
+ 'compression_modes': JList(str),
+ 'is_all_bluestore': bool,
+ 'bluestore_compression_algorithm': str,
+ 'osd_count': int,
+ 'crush_rules_replicated': JList(JObj({}, allow_unknown=True)),
+ 'crush_rules_erasure': JList(JObj({}, allow_unknown=True)),
+ 'pg_autoscale_default_mode': str,
+ 'pg_autoscale_modes': JList(str),
+ 'erasure_code_profiles': JList(JObj({}, allow_unknown=True)),
+ 'used_rules': JObj({}, allow_unknown=True),
+ 'used_profiles': JObj({}, allow_unknown=True),
+ 'nodes': JList(JObj({}, allow_unknown=True)),
+ }))
diff --git a/qa/tasks/mgr/dashboard/test_rbd.py b/qa/tasks/mgr/dashboard/test_rbd.py
new file mode 100644
index 000000000..c2ffbd48e
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_rbd.py
@@ -0,0 +1,978 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=too-many-public-methods
+
+from __future__ import absolute_import
+
+import time
+
+from .helper import DashboardTestCase, JLeaf, JList, JObj
+
+
+class RbdTest(DashboardTestCase):
+ AUTH_ROLES = ['pool-manager', 'block-manager', 'cluster-manager']
+ LIST_VERSION = '2.0'
+
+ @DashboardTestCase.RunAs('test', 'test', [{'rbd-image': ['create', 'update', 'delete']}])
+ def test_read_access_permissions(self):
+ self._get('/api/block/image?offset=0&limit=-1&search=&sort=+name',
+ version=RbdTest.LIST_VERSION)
+ self.assertStatus(403)
+ self.get_image('pool', None, 'image')
+ self.assertStatus(403)
+
+ @DashboardTestCase.RunAs('test', 'test', [{'rbd-image': ['read', 'update', 'delete']}])
+ def test_create_access_permissions(self):
+ self.create_image('pool', None, 'name', 0)
+ self.assertStatus(403)
+ self.create_snapshot('pool', None, 'image', 'snapshot', False)
+ self.assertStatus(403)
+ self.copy_image('src_pool', None, 'src_image', 'dest_pool', None, 'dest_image')
+ self.assertStatus(403)
+ self.clone_image('parent_pool', None, 'parent_image', 'parent_snap', 'pool', None, 'name')
+ self.assertStatus(403)
+
+ @DashboardTestCase.RunAs('test', 'test', [{'rbd-image': ['read', 'create', 'delete']}])
+ def test_update_access_permissions(self):
+ self.edit_image('pool', None, 'image')
+ self.assertStatus(403)
+ self.update_snapshot('pool', None, 'image', 'snapshot', None, None)
+ self.assertStatus(403)
+ self.rollback_snapshot('rbd', None, 'rollback_img', 'snap1')
+ self.assertStatus(403)
+ self.flatten_image('pool', None, 'image')
+ self.assertStatus(403)
+
+ @DashboardTestCase.RunAs('test', 'test', [{'rbd-image': ['read', 'create', 'update']}])
+ def test_delete_access_permissions(self):
+ self.remove_image('pool', None, 'image')
+ self.assertStatus(403)
+ self.remove_snapshot('pool', None, 'image', 'snapshot')
+ self.assertStatus(403)
+
+ @classmethod
+ def create_namespace(cls, pool, namespace):
+ data = {'namespace': namespace}
+ return cls._post('/api/block/pool/{}/namespace'.format(pool), data)
+
+ @classmethod
+ def remove_namespace(cls, pool, namespace):
+ return cls._delete('/api/block/pool/{}/namespace/{}'.format(pool, namespace))
+
+ @classmethod
+ def create_image(cls, pool, namespace, name, size, **kwargs):
+ data = {'name': name, 'pool_name': pool, 'namespace': namespace, 'size': size}
+ data.update(kwargs)
+ return cls._task_post('/api/block/image', data)
+
+ @classmethod
+ def get_image(cls, pool, namespace, name):
+ namespace = '{}%2F'.format(namespace) if namespace else ''
+ return cls._get('/api/block/image/{}%2F{}{}'.format(pool, namespace, name))
+
+ @classmethod
+ def clone_image(cls, parent_pool, parent_namespace, parent_image, parent_snap, pool, namespace,
+ name, **kwargs):
+ # pylint: disable=too-many-arguments
+ data = {'child_image_name': name, 'child_namespace': namespace, 'child_pool_name': pool}
+ data.update(kwargs)
+ parent_namespace = '{}%2F'.format(parent_namespace) if parent_namespace else ''
+ return cls._task_post('/api/block/image/{}%2F{}{}/snap/{}/clone'
+ .format(parent_pool, parent_namespace, parent_image, parent_snap),
+ data)
+
+ @classmethod
+ def copy_image(cls, src_pool, src_namespace, src_image, dest_pool, dest_namespace, dest_image,
+ **kwargs):
+ # pylint: disable=too-many-arguments
+ data = {'dest_image_name': dest_image,
+ 'dest_pool_name': dest_pool,
+ 'dest_namespace': dest_namespace}
+ data.update(kwargs)
+ src_namespace = '{}%2F'.format(src_namespace) if src_namespace else ''
+ return cls._task_post('/api/block/image/{}%2F{}{}/copy'
+ .format(src_pool, src_namespace, src_image), data)
+
+ @classmethod
+ def remove_image(cls, pool, namespace, image):
+ namespace = '{}%2F'.format(namespace) if namespace else ''
+ return cls._task_delete('/api/block/image/{}%2F{}{}'.format(pool, namespace, image))
+
+ # pylint: disable=too-many-arguments
+ @classmethod
+ def edit_image(cls, pool, namespace, image, name=None, size=None, features=None, **kwargs):
+ kwargs.update({'name': name, 'size': size, 'features': features})
+ namespace = '{}%2F'.format(namespace) if namespace else ''
+ return cls._task_put('/api/block/image/{}%2F{}{}'.format(pool, namespace, image), kwargs)
+
+ @classmethod
+ def flatten_image(cls, pool, namespace, image):
+ namespace = '{}%2F'.format(namespace) if namespace else ''
+ return cls._task_post('/api/block/image/{}%2F{}{}/flatten'.format(pool, namespace, image))
+
+ @classmethod
+ def create_snapshot(cls, pool, namespace, image, snapshot, mirrorImageSnapshot):
+ namespace = '{}%2F'.format(namespace) if namespace else ''
+ return cls._task_post('/api/block/image/{}%2F{}{}/snap'.format(pool, namespace, image),
+ {'snapshot_name': snapshot, 'mirrorImageSnapshot': mirrorImageSnapshot}) # noqa E501 #pylint: disable=line-too-long
+
+ @classmethod
+ def remove_snapshot(cls, pool, namespace, image, snapshot):
+ namespace = '{}%2F'.format(namespace) if namespace else ''
+ return cls._task_delete('/api/block/image/{}%2F{}{}/snap/{}'.format(pool, namespace, image,
+ snapshot))
+
+ @classmethod
+ def update_snapshot(cls, pool, namespace, image, snapshot, new_name, is_protected):
+ namespace = '{}%2F'.format(namespace) if namespace else ''
+ return cls._task_put('/api/block/image/{}%2F{}{}/snap/{}'.format(pool, namespace, image,
+ snapshot),
+ {'new_snap_name': new_name, 'is_protected': is_protected})
+
+ @classmethod
+ def rollback_snapshot(cls, pool, namespace, image, snapshot):
+ namespace = '{}%2F'.format(namespace) if namespace else ''
+ return cls._task_post('/api/block/image/{}%2F{}{}/snap/{}/rollback'.format(pool,
+ namespace,
+ image,
+ snapshot))
+
+ @classmethod
+ def setUpClass(cls):
+ super(RbdTest, cls).setUpClass()
+ cls.create_pool('rbd', 2**3, 'replicated')
+ cls.create_pool('rbd_iscsi', 2**3, 'replicated')
+
+ cls.create_image('rbd', None, 'img1', 2**30)
+ cls.create_image('rbd', None, 'img2', 2*2**30)
+ cls.create_image('rbd_iscsi', None, 'img1', 2**30)
+ cls.create_image('rbd_iscsi', None, 'img2', 2*2**30)
+
+ osd_metadata = cls.ceph_cluster.mon_manager.get_osd_metadata()
+ cls.bluestore_support = True
+ for osd in osd_metadata:
+ if osd['osd_objectstore'] != 'bluestore':
+ cls.bluestore_support = False
+ break
+
+ @classmethod
+ def tearDownClass(cls):
+ super(RbdTest, cls).tearDownClass()
+ cls._ceph_cmd(['osd', 'pool', 'delete', 'rbd', 'rbd', '--yes-i-really-really-mean-it'])
+ cls._ceph_cmd(['osd', 'pool', 'delete', 'rbd_iscsi', 'rbd_iscsi',
+ '--yes-i-really-really-mean-it'])
+ cls._ceph_cmd(['osd', 'pool', 'delete', 'rbd_data', 'rbd_data',
+ '--yes-i-really-really-mean-it'])
+
+ def create_image_in_trash(self, pool, name, delay=0):
+ self.create_image(pool, None, name, 10240)
+ img = self._get('/api/block/image/{}%2F{}'.format(pool, name))
+
+ self._task_post("/api/block/image/{}%2F{}/move_trash".format(pool, name),
+ {'delay': delay})
+ self.assertStatus([200, 201])
+ return img['id']
+
+ @classmethod
+ def remove_trash(cls, pool, image_id, force=False):
+ return cls._task_delete('/api/block/image/trash/{}%2F{}/?force={}'.format(
+ pool, image_id, force))
+
+ @classmethod
+ def restore_trash(cls, pool, namespace, image_id, new_image_name):
+ data = {'new_image_name': new_image_name}
+ namespace = '{}%2F'.format(namespace) if namespace else ''
+ return cls._task_post('/api/block/image/trash/{}%2F{}{}/restore'.format(pool,
+ namespace,
+ image_id), data)
+
+ @classmethod
+ def purge_trash(cls, pool):
+ return cls._task_post('/api/block/image/trash/purge?pool_name={}'.format(pool))
+
+ @classmethod
+ def get_trash(cls, pool, image_id):
+ trash = cls._get('/api/block/image/trash/?pool_name={}'.format(pool))
+ if isinstance(trash, list):
+ for trash_pool in trash:
+ for image in trash_pool['value']:
+ if image['id'] == image_id:
+ return image
+
+ return None
+
+ def _validate_image(self, img, **kwargs):
+ """
+ Example of an RBD image json:
+
+ {
+ "size": 1073741824,
+ "obj_size": 4194304,
+ "mirror_mode": "journal",
+ "num_objs": 256,
+ "order": 22,
+ "block_name_prefix": "rbd_data.10ae2ae8944a",
+ "name": "img1",
+ "pool_name": "rbd",
+ "features": 61,
+ "primary": true,
+ "features_name": ["deep-flatten", "exclusive-lock", "fast-diff", "layering",
+ "object-map"]
+ }
+ """
+ schema = JObj(sub_elems={
+ 'size': JLeaf(int),
+ 'obj_size': JLeaf(int),
+ 'num_objs': JLeaf(int),
+ 'order': JLeaf(int),
+ 'block_name_prefix': JLeaf(str),
+ 'name': JLeaf(str),
+ 'id': JLeaf(str),
+ 'unique_id': JLeaf(str),
+ 'image_format': JLeaf(int),
+ 'pool_name': JLeaf(str),
+ 'namespace': JLeaf(str, none=True),
+ 'primary': JLeaf(bool, none=True),
+ 'features': JLeaf(int),
+ 'features_name': JList(JLeaf(str)),
+ 'stripe_count': JLeaf(int, none=True),
+ 'stripe_unit': JLeaf(int, none=True),
+ 'parent': JObj(sub_elems={'pool_name': JLeaf(str),
+ 'pool_namespace': JLeaf(str, none=True),
+ 'image_name': JLeaf(str),
+ 'snap_name': JLeaf(str)}, none=True),
+ 'data_pool': JLeaf(str, none=True),
+ 'snapshots': JList(JLeaf(dict)),
+ 'timestamp': JLeaf(str, none=True),
+ 'disk_usage': JLeaf(int, none=True),
+ 'total_disk_usage': JLeaf(int, none=True),
+ 'configuration': JList(JObj(sub_elems={
+ 'name': JLeaf(str),
+ 'source': JLeaf(int),
+ 'value': JLeaf(str),
+ })),
+ 'metadata': JObj({}, allow_unknown=True),
+ 'mirror_mode': JLeaf(str),
+ })
+ self.assertSchema(img, schema)
+
+ for k, v in kwargs.items():
+ if isinstance(v, list):
+ self.assertSetEqual(set(img[k]), set(v))
+ else:
+ self.assertEqual(img[k], v)
+
+ def _validate_snapshot(self, snap, **kwargs):
+ self.assertIn('id', snap)
+ self.assertIn('name', snap)
+ self.assertIn('is_protected', snap)
+ self.assertIn('timestamp', snap)
+ self.assertIn('size', snap)
+ self.assertIn('children', snap)
+
+ for k, v in kwargs.items():
+ if isinstance(v, list):
+ self.assertSetEqual(set(snap[k]), set(v))
+ else:
+ self.assertEqual(snap[k], v)
+
+ def _validate_snapshot_list(self, snap_list, snap_name=None, **kwargs):
+ found = False
+ for snap in snap_list:
+ self.assertIn('name', snap)
+ if snap_name and snap['name'] == snap_name:
+ found = True
+ self._validate_snapshot(snap, **kwargs)
+ break
+ if snap_name and not found:
+ self.fail("Snapshot {} not found".format(snap_name))
+
+ def test_list(self):
+ data = self._get('/api/block/image?offset=0&limit=-1&search=&sort=+name',
+ version=RbdTest.LIST_VERSION)
+ self.assertStatus(200)
+ self.assertEqual(len(data), 2)
+
+ for pool_view in data:
+ self.assertIsNotNone(pool_view['value'])
+ self.assertIn('pool_name', pool_view)
+ self.assertIn(pool_view['pool_name'], ['rbd', 'rbd_iscsi'])
+ image_list = pool_view['value']
+ self.assertEqual(len(image_list), 2)
+
+ for img in image_list:
+ self.assertIn('name', img)
+ self.assertIn('pool_name', img)
+ self.assertIn(img['pool_name'], ['rbd', 'rbd_iscsi'])
+ if img['name'] == 'img1':
+ self._validate_image(img, size=1073741824,
+ num_objs=256, obj_size=4194304,
+ features_name=['deep-flatten',
+ 'exclusive-lock',
+ 'fast-diff',
+ 'layering',
+ 'object-map'])
+ elif img['name'] == 'img2':
+ self._validate_image(img, size=2147483648,
+ num_objs=512, obj_size=4194304,
+ features_name=['deep-flatten',
+ 'exclusive-lock',
+ 'fast-diff',
+ 'layering',
+ 'object-map'])
+ else:
+ assert False, "Unexcepted image '{}' in result list".format(img['name'])
+
+ def test_create(self):
+ rbd_name = 'test_rbd'
+ self.create_image('rbd', None, rbd_name, 10240)
+ self.assertStatus(201)
+
+ img = self.get_image('rbd', None, 'test_rbd')
+ self.assertStatus(200)
+
+ self._validate_image(img, name=rbd_name, size=10240,
+ num_objs=1, obj_size=4194304,
+ features_name=['deep-flatten',
+ 'exclusive-lock',
+ 'fast-diff', 'layering',
+ 'object-map'])
+
+ self.remove_image('rbd', None, rbd_name)
+
+ def test_create_with_configuration(self):
+ pool = 'rbd'
+ image_name = 'image_with_config'
+ size = 10240
+ configuration = {
+ 'rbd_qos_bps_limit': 10240,
+ 'rbd_qos_bps_burst': 10240 * 2,
+ }
+ expected = [{
+ 'name': 'rbd_qos_bps_limit',
+ 'source': 2,
+ 'value': str(10240),
+ }, {
+ 'name': 'rbd_qos_bps_burst',
+ 'source': 2,
+ 'value': str(10240 * 2),
+ }]
+
+ self.create_image(pool, None, image_name, size, configuration=configuration)
+ self.assertStatus(201)
+ img = self.get_image('rbd', None, image_name)
+ self.assertStatus(200)
+ for conf in expected:
+ self.assertIn(conf, img['configuration'])
+
+ self.remove_image(pool, None, image_name)
+
+ def test_create_with_metadata(self):
+ pool = 'rbd'
+ image_name = 'image_with_meta'
+ size = 10240
+ metadata = {
+ 'test1': 'test',
+ 'test2': 'value',
+ }
+
+ self.create_image(pool, None, image_name, size, metadata=metadata)
+ self.assertStatus(201)
+ img = self.get_image('rbd', None, image_name)
+ self.assertStatus(200)
+ self.assertEqual(len(metadata), len(img['metadata']))
+ for meta in metadata:
+ self.assertIn(meta, img['metadata'])
+
+ self.remove_image(pool, None, image_name)
+
+ def test_create_rbd_in_data_pool(self):
+ if not self.bluestore_support:
+ self.skipTest('requires bluestore cluster')
+
+ self.create_pool('data_pool', 2**4, 'erasure')
+
+ rbd_name = 'test_rbd_in_data_pool'
+ self.create_image('rbd', None, rbd_name, 10240, data_pool='data_pool')
+ self.assertStatus(201)
+
+ img = self.get_image('rbd', None, 'test_rbd_in_data_pool')
+ self.assertStatus(200)
+
+ self._validate_image(img, name=rbd_name, size=10240,
+ num_objs=1, obj_size=4194304,
+ data_pool='data_pool',
+ features_name=['data-pool', 'deep-flatten',
+ 'exclusive-lock',
+ 'fast-diff', 'layering',
+ 'object-map'])
+
+ self.remove_image('rbd', None, rbd_name)
+ self.assertStatus(204)
+ self._ceph_cmd(['osd', 'pool', 'delete', 'data_pool', 'data_pool',
+ '--yes-i-really-really-mean-it'])
+
+ def test_create_rbd_twice(self):
+ res = self.create_image('rbd', None, 'test_rbd_twice', 10240)
+
+ res = self.create_image('rbd', None, 'test_rbd_twice', 10240)
+ self.assertStatus(400)
+ self.assertEqual(res, {"code": '17', 'status': 400, "component": "rbd",
+ "detail": "[errno 17] RBD image already exists (error creating "
+ "image)",
+ 'task': {'name': 'rbd/create',
+ 'metadata': {'pool_name': 'rbd', 'namespace': None,
+ 'image_name': 'test_rbd_twice'}}})
+ self.remove_image('rbd', None, 'test_rbd_twice')
+ self.assertStatus(204)
+
+ def test_snapshots_and_clone_info(self):
+ self.create_snapshot('rbd', None, 'img1', 'snap1', False)
+ self.create_snapshot('rbd', None, 'img1', 'snap2', False)
+ self._rbd_cmd(['snap', 'protect', 'rbd/img1@snap1'])
+ self._rbd_cmd(['clone', 'rbd/img1@snap1', 'rbd_iscsi/img1_clone'])
+
+ img = self.get_image('rbd', None, 'img1')
+ self.assertStatus(200)
+ self._validate_image(img, name='img1', size=1073741824,
+ num_objs=256, obj_size=4194304, parent=None,
+ features_name=['deep-flatten', 'exclusive-lock',
+ 'fast-diff', 'layering',
+ 'object-map'])
+ for snap in img['snapshots']:
+ if snap['name'] == 'snap1':
+ self._validate_snapshot(snap, is_protected=True)
+ self.assertEqual(len(snap['children']), 1)
+ self.assertDictEqual(snap['children'][0],
+ {'pool_name': 'rbd_iscsi',
+ 'image_name': 'img1_clone'})
+ elif snap['name'] == 'snap2':
+ self._validate_snapshot(snap, is_protected=False)
+
+ img = self.get_image('rbd_iscsi', None, 'img1_clone')
+ self.assertStatus(200)
+ self._validate_image(img, name='img1_clone', size=1073741824,
+ num_objs=256, obj_size=4194304,
+ parent={'pool_name': 'rbd', 'pool_namespace': '',
+ 'image_name': 'img1', 'snap_name': 'snap1'},
+ features_name=['deep-flatten', 'exclusive-lock',
+ 'fast-diff', 'layering',
+ 'object-map'])
+ self.remove_image('rbd_iscsi', None, 'img1_clone')
+ self.assertStatus(204)
+
+ def test_disk_usage(self):
+ self._rbd_cmd(['bench', '--io-type', 'write', '--io-total', '50M', 'rbd/img2'])
+ self.create_snapshot('rbd', None, 'img2', 'snap1', False)
+ self._rbd_cmd(['bench', '--io-type', 'write', '--io-total', '20M', 'rbd/img2'])
+ self.create_snapshot('rbd', None, 'img2', 'snap2', False)
+ self._rbd_cmd(['bench', '--io-type', 'write', '--io-total', '10M', 'rbd/img2'])
+ self.create_snapshot('rbd', None, 'img2', 'snap3', False)
+ self._rbd_cmd(['bench', '--io-type', 'write', '--io-total', '5M', 'rbd/img2'])
+ img = self.get_image('rbd', None, 'img2')
+ self.assertStatus(200)
+ self._validate_image(img, name='img2', size=2147483648,
+ total_disk_usage=268435456, disk_usage=67108864)
+
+ def test_delete_non_existent_image(self):
+ res = self.remove_image('rbd', None, 'i_dont_exist')
+ self.assertStatus(404)
+ self.assertEqual(res, {u'code': 404, "status": 404, "component": None,
+ "detail": "(404, 'Image not found')",
+ 'task': {'name': 'rbd/delete',
+ 'metadata': {'image_spec': 'rbd/i_dont_exist'}}})
+
+ def test_image_delete(self):
+ self.create_image('rbd', None, 'delete_me', 2**30)
+ self.assertStatus(201)
+ self.create_snapshot('rbd', None, 'delete_me', 'snap1', False)
+ self.assertStatus(201)
+ self.create_snapshot('rbd', None, 'delete_me', 'snap2', False)
+ self.assertStatus(201)
+
+ img = self.get_image('rbd', None, 'delete_me')
+ self.assertStatus(200)
+ self._validate_image(img, name='delete_me', size=2**30)
+ self.assertEqual(len(img['snapshots']), 2)
+
+ self.remove_snapshot('rbd', None, 'delete_me', 'snap1')
+ self.assertStatus(204)
+ self.remove_snapshot('rbd', None, 'delete_me', 'snap2')
+ self.assertStatus(204)
+
+ img = self.get_image('rbd', None, 'delete_me')
+ self.assertStatus(200)
+ self._validate_image(img, name='delete_me', size=2**30)
+ self.assertEqual(len(img['snapshots']), 0)
+
+ self.remove_image('rbd', None, 'delete_me')
+ self.assertStatus(204)
+
+ def test_image_delete_with_snapshot(self):
+ self.create_image('rbd', None, 'delete_me', 2**30)
+ self.assertStatus(201)
+ self.create_snapshot('rbd', None, 'delete_me', 'snap1', False)
+ self.assertStatus(201)
+ self.create_snapshot('rbd', None, 'delete_me', 'snap2', False)
+ self.assertStatus(201)
+
+ img = self.get_image('rbd', None, 'delete_me')
+ self.assertStatus(200)
+ self._validate_image(img, name='delete_me', size=2**30)
+ self.assertEqual(len(img['snapshots']), 2)
+
+ self.remove_image('rbd', None, 'delete_me')
+ self.assertStatus(204)
+
+ def test_image_rename(self):
+ self.create_image('rbd', None, 'edit_img', 2**30)
+ self.assertStatus(201)
+ self.get_image('rbd', None, 'edit_img')
+ self.assertStatus(200)
+ self.edit_image('rbd', None, 'edit_img', 'new_edit_img')
+ self.assertStatus(200)
+ self.get_image('rbd', None, 'edit_img')
+ self.assertStatus(404)
+ self.get_image('rbd', None, 'new_edit_img')
+ self.assertStatus(200)
+ self.remove_image('rbd', None, 'new_edit_img')
+ self.assertStatus(204)
+
+ def test_image_resize(self):
+ self.create_image('rbd', None, 'edit_img', 2**30)
+ self.assertStatus(201)
+ img = self.get_image('rbd', None, 'edit_img')
+ self.assertStatus(200)
+ self._validate_image(img, size=2**30)
+ self.edit_image('rbd', None, 'edit_img', size=2*2**30)
+ self.assertStatus(200)
+ img = self.get_image('rbd', None, 'edit_img')
+ self.assertStatus(200)
+ self._validate_image(img, size=2*2**30)
+ self.remove_image('rbd', None, 'edit_img')
+ self.assertStatus(204)
+
+ def test_image_change_features(self):
+ self.create_image('rbd', None, 'edit_img', 2**30, features=["layering"])
+ self.assertStatus(201)
+ img = self.get_image('rbd', None, 'edit_img')
+ self.assertStatus(200)
+ self._validate_image(img, features_name=["layering"])
+ self.edit_image('rbd', None, 'edit_img',
+ features=["fast-diff", "object-map", "exclusive-lock"])
+ self.assertStatus(200)
+ img = self.get_image('rbd', None, 'edit_img')
+ self.assertStatus(200)
+ self._validate_image(img, features_name=['exclusive-lock',
+ 'fast-diff', 'layering',
+ 'object-map'])
+ self.edit_image('rbd', None, 'edit_img',
+ features=["journaling", "exclusive-lock"])
+ self.assertStatus(200)
+ img = self.get_image('rbd', None, 'edit_img')
+ self.assertStatus(200)
+ self._validate_image(img, features_name=['exclusive-lock',
+ 'journaling', 'layering'])
+ self.remove_image('rbd', None, 'edit_img')
+ self.assertStatus(204)
+
+ def test_image_change_config(self):
+ pool = 'rbd'
+ image = 'image_with_config'
+ initial_conf = {
+ 'rbd_qos_bps_limit': 10240,
+ 'rbd_qos_write_iops_limit': None
+ }
+ initial_expect = [{
+ 'name': 'rbd_qos_bps_limit',
+ 'source': 2,
+ 'value': '10240',
+ }, {
+ 'name': 'rbd_qos_write_iops_limit',
+ 'source': 0,
+ 'value': '0',
+ }]
+ new_conf = {
+ 'rbd_qos_bps_limit': 0,
+ 'rbd_qos_bps_burst': 20480,
+ 'rbd_qos_write_iops_limit': None
+ }
+ new_expect = [{
+ 'name': 'rbd_qos_bps_limit',
+ 'source': 2,
+ 'value': '0',
+ }, {
+ 'name': 'rbd_qos_bps_burst',
+ 'source': 2,
+ 'value': '20480',
+ }, {
+ 'name': 'rbd_qos_write_iops_limit',
+ 'source': 0,
+ 'value': '0',
+ }]
+
+ self.create_image(pool, None, image, 2**30, configuration=initial_conf)
+ self.assertStatus(201)
+ img = self.get_image(pool, None, image)
+ self.assertStatus(200)
+ for conf in initial_expect:
+ self.assertIn(conf, img['configuration'])
+
+ self.edit_image(pool, None, image, configuration=new_conf)
+ img = self.get_image(pool, None, image)
+ self.assertStatus(200)
+ for conf in new_expect:
+ self.assertIn(conf, img['configuration'])
+
+ self.remove_image(pool, None, image)
+ self.assertStatus(204)
+
+ def test_image_change_meta(self):
+ pool = 'rbd'
+ image = 'image_with_meta'
+ initial_meta = {
+ 'test1': 'test',
+ 'test2': 'value',
+ 'test3': None,
+ }
+ initial_expect = {
+ 'test1': 'test',
+ 'test2': 'value',
+ }
+ new_meta = {
+ 'test1': None,
+ 'test2': 'new_value',
+ 'test3': 'value',
+ 'test4': None,
+ }
+ new_expect = {
+ 'test2': 'new_value',
+ 'test3': 'value',
+ }
+
+ self.create_image(pool, None, image, 2**30, metadata=initial_meta)
+ self.assertStatus(201)
+ img = self.get_image(pool, None, image)
+ self.assertStatus(200)
+ self.assertEqual(len(initial_expect), len(img['metadata']))
+ for meta in initial_expect:
+ self.assertIn(meta, img['metadata'])
+
+ self.edit_image(pool, None, image, metadata=new_meta)
+ img = self.get_image(pool, None, image)
+ self.assertStatus(200)
+ self.assertEqual(len(new_expect), len(img['metadata']))
+ for meta in new_expect:
+ self.assertIn(meta, img['metadata'])
+
+ self.remove_image(pool, None, image)
+ self.assertStatus(204)
+
+ def test_update_snapshot(self):
+ self.create_snapshot('rbd', None, 'img1', 'snap5', False)
+ self.assertStatus(201)
+ img = self.get_image('rbd', None, 'img1')
+ self._validate_snapshot_list(img['snapshots'], 'snap5', is_protected=False)
+
+ self.update_snapshot('rbd', None, 'img1', 'snap5', 'snap6', None)
+ self.assertStatus(200)
+ img = self.get_image('rbd', None, 'img1')
+ self._validate_snapshot_list(img['snapshots'], 'snap6', is_protected=False)
+
+ self.update_snapshot('rbd', None, 'img1', 'snap6', None, True)
+ self.assertStatus(200)
+ img = self.get_image('rbd', None, 'img1')
+ self._validate_snapshot_list(img['snapshots'], 'snap6', is_protected=True)
+
+ self.update_snapshot('rbd', None, 'img1', 'snap6', 'snap5', False)
+ self.assertStatus(200)
+ img = self.get_image('rbd', None, 'img1')
+ self._validate_snapshot_list(img['snapshots'], 'snap5', is_protected=False)
+
+ self.remove_snapshot('rbd', None, 'img1', 'snap5')
+ self.assertStatus(204)
+
+ def test_snapshot_rollback(self):
+ self.create_image('rbd', None, 'rollback_img', 2**30,
+ features=["layering", "exclusive-lock", "fast-diff",
+ "object-map"])
+ self.assertStatus(201)
+ self.create_snapshot('rbd', None, 'rollback_img', 'snap1', False)
+ self.assertStatus(201)
+
+ img = self.get_image('rbd', None, 'rollback_img')
+ self.assertStatus(200)
+ self.assertEqual(img['disk_usage'], 0)
+
+ self._rbd_cmd(['bench', '--io-type', 'write', '--io-total', '5M',
+ 'rbd/rollback_img'])
+
+ img = self.get_image('rbd', None, 'rollback_img')
+ self.assertStatus(200)
+ self.assertGreater(img['disk_usage'], 0)
+
+ self.rollback_snapshot('rbd', None, 'rollback_img', 'snap1')
+ self.assertStatus([201, 200])
+
+ img = self.get_image('rbd', None, 'rollback_img')
+ self.assertStatus(200)
+ self.assertEqual(img['disk_usage'], 0)
+
+ self.remove_snapshot('rbd', None, 'rollback_img', 'snap1')
+ self.assertStatus(204)
+ self.remove_image('rbd', None, 'rollback_img')
+ self.assertStatus(204)
+
+ def test_clone(self):
+ self.create_image('rbd', None, 'cimg', 2**30, features=["layering"],
+ metadata={'key1': 'val1'})
+ self.assertStatus(201)
+ self.create_snapshot('rbd', None, 'cimg', 'snap1', False)
+ self.assertStatus(201)
+ self.update_snapshot('rbd', None, 'cimg', 'snap1', None, True)
+ self.assertStatus(200)
+ self.clone_image('rbd', None, 'cimg', 'snap1', 'rbd', None, 'cimg-clone',
+ features=["layering", "exclusive-lock", "fast-diff",
+ "object-map"],
+ metadata={'key1': None, 'key2': 'val2'})
+ self.assertStatus([200, 201])
+
+ img = self.get_image('rbd', None, 'cimg-clone')
+ self.assertStatus(200)
+ self._validate_image(img, features_name=['exclusive-lock',
+ 'fast-diff', 'layering',
+ 'object-map'],
+ parent={'pool_name': 'rbd', 'pool_namespace': '',
+ 'image_name': 'cimg', 'snap_name': 'snap1'},
+ metadata={'key2': 'val2'})
+
+ res = self.remove_image('rbd', None, 'cimg')
+ self.assertStatus(400)
+ self.assertIn('code', res)
+ self.assertEqual(res['code'], '16')
+
+ self.remove_image('rbd', None, 'cimg-clone')
+ self.assertStatus(204)
+ self.remove_image('rbd', None, 'cimg')
+ self.assertStatus(204)
+
+ def test_copy(self):
+ self.create_image('rbd', None, 'coimg', 2**30,
+ features=["layering", "exclusive-lock", "fast-diff",
+ "object-map"],
+ metadata={'key1': 'val1'})
+ self.assertStatus(201)
+
+ self._rbd_cmd(['bench', '--io-type', 'write', '--io-total', '5M',
+ 'rbd/coimg'])
+
+ self.copy_image('rbd', None, 'coimg', 'rbd_iscsi', None, 'coimg-copy',
+ features=["layering", "fast-diff", "exclusive-lock",
+ "object-map"],
+ metadata={'key1': None, 'key2': 'val2'})
+ self.assertStatus([200, 201])
+
+ img = self.get_image('rbd', None, 'coimg')
+ self.assertStatus(200)
+ self._validate_image(img, features_name=['layering', 'exclusive-lock',
+ 'fast-diff', 'object-map'],
+ metadata={'key1': 'val1'})
+
+ img_copy = self.get_image('rbd_iscsi', None, 'coimg-copy')
+ self._validate_image(img_copy, features_name=['exclusive-lock',
+ 'fast-diff', 'layering',
+ 'object-map'],
+ metadata={'key2': 'val2'},
+ disk_usage=img['disk_usage'])
+
+ self.remove_image('rbd', None, 'coimg')
+ self.assertStatus(204)
+ self.remove_image('rbd_iscsi', None, 'coimg-copy')
+ self.assertStatus(204)
+
+ def test_flatten(self):
+ self.create_snapshot('rbd', None, 'img1', 'snapf', False)
+ self.update_snapshot('rbd', None, 'img1', 'snapf', None, True)
+ self.clone_image('rbd', None, 'img1', 'snapf', 'rbd_iscsi', None, 'img1_snapf_clone')
+
+ img = self.get_image('rbd_iscsi', None, 'img1_snapf_clone')
+ self.assertStatus(200)
+ self.assertIsNotNone(img['parent'])
+
+ self.flatten_image('rbd_iscsi', None, 'img1_snapf_clone')
+ self.assertStatus([200, 201])
+
+ img = self.get_image('rbd_iscsi', None, 'img1_snapf_clone')
+ self.assertStatus(200)
+ self.assertIsNone(img['parent'])
+
+ self.update_snapshot('rbd', None, 'img1', 'snapf', None, False)
+ self.remove_snapshot('rbd', None, 'img1', 'snapf')
+ self.assertStatus(204)
+
+ self.remove_image('rbd_iscsi', None, 'img1_snapf_clone')
+ self.assertStatus(204)
+
+ def test_default_features(self):
+ default_features = self._get('/api/block/image/default_features')
+ self.assertEqual(default_features, [
+ 'deep-flatten', 'exclusive-lock', 'fast-diff', 'layering', 'object-map'])
+
+ def test_clone_format_version(self):
+ config_name = 'rbd_default_clone_format'
+
+ def _get_config_by_name(conf_name):
+ data = self._get('/api/cluster_conf/{}'.format(conf_name))
+ if 'value' in data:
+ return data['value']
+ return None
+
+ # with rbd_default_clone_format = auto
+ clone_format_version = self._get('/api/block/image/clone_format_version')
+ self.assertEqual(clone_format_version, 1)
+ self.assertStatus(200)
+
+ # with rbd_default_clone_format = 1
+ value = [{'section': "global", 'value': "1"}]
+ self._post('/api/cluster_conf', {
+ 'name': config_name,
+ 'value': value
+ })
+ self.wait_until_equal(
+ lambda: _get_config_by_name(config_name),
+ value,
+ timeout=60)
+ clone_format_version = self._get('/api/block/image/clone_format_version')
+ self.assertEqual(clone_format_version, 1)
+ self.assertStatus(200)
+
+ # with rbd_default_clone_format = 2
+ value = [{'section': "global", 'value': "2"}]
+ self._post('/api/cluster_conf', {
+ 'name': config_name,
+ 'value': value
+ })
+ self.wait_until_equal(
+ lambda: _get_config_by_name(config_name),
+ value,
+ timeout=60)
+ clone_format_version = self._get('/api/block/image/clone_format_version')
+ self.assertEqual(clone_format_version, 2)
+ self.assertStatus(200)
+
+ value = []
+ self._post('/api/cluster_conf', {
+ 'name': config_name,
+ 'value': value
+ })
+ self.wait_until_equal(
+ lambda: _get_config_by_name(config_name),
+ None,
+ timeout=60)
+
+ def test_image_with_namespace(self):
+ self.create_namespace('rbd', 'ns')
+ self.create_image('rbd', 'ns', 'test', 10240)
+ self.assertStatus(201)
+
+ img = self.get_image('rbd', 'ns', 'test')
+ self.assertStatus(200)
+
+ self._validate_image(img, name='test', size=10240,
+ pool_name='rbd', namespace='ns',
+ num_objs=1, obj_size=4194304,
+ features_name=['deep-flatten',
+ 'exclusive-lock',
+ 'fast-diff', 'layering',
+ 'object-map'])
+
+ self.remove_image('rbd', 'ns', 'test')
+ self.remove_namespace('rbd', 'ns')
+
+ def test_move_image_to_trash(self):
+ img_id = self.create_image_in_trash('rbd', 'test_rbd')
+
+ self.get_image('rbd', None, 'test_rbd')
+ self.assertStatus(404)
+
+ time.sleep(1)
+
+ image = self.get_trash('rbd', img_id)
+ self.assertIsNotNone(image)
+
+ self.remove_trash('rbd', img_id)
+
+ def test_list_trash(self):
+ img_id = self.create_image_in_trash('rbd', 'test_rbd', 0)
+ data = self._get('/api/block/image/trash/?pool_name={}'.format('rbd'))
+ self.assertStatus(200)
+ self.assertIsInstance(data, list)
+ self.assertIsNotNone(data)
+
+ self.remove_trash('rbd', img_id)
+ self.assertStatus(204)
+
+ def test_restore_trash(self):
+ img_id = self.create_image_in_trash('rbd', 'test_rbd')
+
+ self.restore_trash('rbd', None, img_id, 'test_rbd')
+
+ self.get_image('rbd', None, 'test_rbd')
+ self.assertStatus(200)
+
+ image = self.get_trash('rbd', img_id)
+ self.assertIsNone(image)
+
+ self.remove_image('rbd', None, 'test_rbd')
+
+ def test_remove_expired_trash(self):
+ img_id = self.create_image_in_trash('rbd', 'test_rbd', 0)
+ self.remove_trash('rbd', img_id, False)
+ self.assertStatus(204)
+
+ image = self.get_trash('rbd', img_id)
+ self.assertIsNone(image)
+
+ def test_remove_not_expired_trash(self):
+ img_id = self.create_image_in_trash('rbd', 'test_rbd', 9999)
+ self.remove_trash('rbd', img_id, False)
+ self.assertStatus(400)
+
+ time.sleep(1)
+
+ image = self.get_trash('rbd', img_id)
+ self.assertIsNotNone(image)
+
+ self.remove_trash('rbd', img_id, True)
+
+ def test_remove_not_expired_trash_with_force(self):
+ img_id = self.create_image_in_trash('rbd', 'test_rbd', 9999)
+ self.remove_trash('rbd', img_id, True)
+ self.assertStatus(204)
+
+ image = self.get_trash('rbd', img_id)
+ self.assertIsNone(image)
+
+ def test_purge_trash(self):
+ id_expired = self.create_image_in_trash('rbd', 'test_rbd_expired', 0)
+ id_not_expired = self.create_image_in_trash('rbd', 'test_rbd', 9999)
+
+ time.sleep(1)
+
+ self.purge_trash('rbd')
+ self.assertStatus([200, 201])
+
+ time.sleep(1)
+
+ trash_not_expired = self.get_trash('rbd', id_not_expired)
+ self.assertIsNotNone(trash_not_expired)
+
+ self.wait_until_equal(lambda: self.get_trash('rbd', id_expired), None, 60)
+
+ def test_list_namespaces(self):
+ self.create_namespace('rbd', 'ns')
+
+ namespaces = self._get('/api/block/pool/rbd/namespace')
+ self.assertStatus(200)
+ self.assertEqual(len(namespaces), 1)
+
+ self.remove_namespace('rbd', 'ns')
diff --git a/qa/tasks/mgr/dashboard/test_rbd_mirroring.py b/qa/tasks/mgr/dashboard/test_rbd_mirroring.py
new file mode 100644
index 000000000..b6a86e405
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_rbd_mirroring.py
@@ -0,0 +1,195 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=too-many-public-methods
+
+from __future__ import absolute_import
+
+from .helper import DashboardTestCase
+
+
+class RbdMirroringTest(DashboardTestCase):
+ AUTH_ROLES = ['pool-manager', 'block-manager']
+
+ @classmethod
+ def get_pool(cls, pool):
+ data = cls._get('/api/block/mirroring/pool/{}'.format(pool))
+ if isinstance(data, dict):
+ return data
+ return {}
+
+ @classmethod
+ def update_pool(cls, pool, mirror_mode):
+ data = {'mirror_mode': mirror_mode}
+ return cls._task_put('/api/block/mirroring/pool/{}'.format(pool),
+ data)
+
+ @classmethod
+ def list_peers(cls, pool):
+ data = cls._get('/api/block/mirroring/pool/{}/peer'.format(pool))
+ if isinstance(data, list):
+ return data
+ return []
+
+ @classmethod
+ def get_peer(cls, pool, peer_uuid):
+ data = cls._get('/api/block/mirroring/pool/{}/peer/{}'.format(pool, peer_uuid))
+ if isinstance(data, dict):
+ return data
+ return {}
+
+ @classmethod
+ def create_peer(cls, pool, cluster_name, client_id, **kwargs):
+ data = {'cluster_name': cluster_name, 'client_id': client_id}
+ data.update(kwargs)
+ return cls._task_post('/api/block/mirroring/pool/{}/peer'.format(pool),
+ data)
+
+ @classmethod
+ def update_peer(cls, pool, peer_uuid, **kwargs):
+ return cls._task_put('/api/block/mirroring/pool/{}/peer/{}'.format(pool, peer_uuid),
+ kwargs)
+
+ @classmethod
+ def delete_peer(cls, pool, peer_uuid):
+ return cls._task_delete('/api/block/mirroring/pool/{}/peer/{}'.format(pool, peer_uuid))
+
+ @classmethod
+ def setUpClass(cls):
+ super(RbdMirroringTest, cls).setUpClass()
+ cls.create_pool('rbd', 2**3, 'replicated')
+
+ @classmethod
+ def tearDownClass(cls):
+ super(RbdMirroringTest, cls).tearDownClass()
+ cls._ceph_cmd(['osd', 'pool', 'delete', 'rbd', 'rbd', '--yes-i-really-really-mean-it'])
+
+ @DashboardTestCase.RunAs('test', 'test', [{'rbd-mirroring': ['create', 'update', 'delete']}])
+ def test_read_access_permissions(self):
+ self.get_pool('rbd')
+ self.assertStatus(403)
+ self.list_peers('rbd')
+ self.assertStatus(403)
+ self.get_peer('rbd', '123')
+ self.assertStatus(403)
+
+ @DashboardTestCase.RunAs('test', 'test', [{'rbd-mirroring': ['read', 'update', 'delete']}])
+ def test_create_access_permissions(self):
+ self.create_peer('rbd', 'remote', 'id')
+ self.assertStatus(403)
+
+ @DashboardTestCase.RunAs('test', 'test', [{'rbd-mirroring': ['read', 'create', 'delete']}])
+ def test_update_access_permissions(self):
+ self.update_peer('rbd', '123')
+ self.assertStatus(403)
+
+ @DashboardTestCase.RunAs('test', 'test', [{'rbd-mirroring': ['read', 'create', 'update']}])
+ def test_delete_access_permissions(self):
+ self.delete_peer('rbd', '123')
+ self.assertStatus(403)
+
+ def test_mirror_mode(self):
+ self.update_pool('rbd', 'disabled')
+ mode = self.get_pool('rbd').get('mirror_mode')
+ self.assertEqual(mode, 'disabled')
+
+ self.update_pool('rbd', 'image')
+ mode = self.get_pool('rbd').get('mirror_mode')
+ self.assertEqual(mode, 'image')
+
+ self.update_pool('rbd', 'pool')
+ mode = self.get_pool('rbd').get('mirror_mode')
+ self.assertEqual(mode, 'pool')
+
+ self.update_pool('rbd', 'disabled')
+ mode = self.get_pool('rbd').get('mirror_mode')
+ self.assertEqual(mode, 'disabled')
+
+ def test_set_invalid_mirror_mode(self):
+ self.update_pool('rbd', 'invalid')
+ self.assertStatus(400)
+
+ def test_set_same_mirror_mode(self):
+ self.update_pool('rbd', 'disabled')
+ self.update_pool('rbd', 'disabled')
+ self.assertStatus(200)
+
+ def test_peer(self):
+ self.update_pool('rbd', 'image')
+ self.assertStatus(200)
+
+ peers = self.list_peers('rbd')
+ self.assertStatus(200)
+ self.assertEqual([], peers)
+
+ uuid = self.create_peer('rbd', 'remote', 'admin')['uuid']
+ self.assertStatus(201)
+
+ peers = self.list_peers('rbd')
+ self.assertStatus(200)
+ self.assertEqual([uuid], peers)
+
+ expected_peer = {
+ 'uuid': uuid,
+ 'cluster_name': 'remote',
+ 'site_name': 'remote',
+ 'client_id': 'admin',
+ 'mon_host': '',
+ 'key': '',
+ 'direction': 'rx-tx',
+ 'mirror_uuid': ''
+ }
+ peer = self.get_peer('rbd', uuid)
+ self.assertEqual(expected_peer, peer)
+
+ self.update_peer('rbd', uuid, mon_host='1.2.3.4')
+ self.assertStatus(200)
+
+ expected_peer['mon_host'] = '1.2.3.4'
+ peer = self.get_peer('rbd', uuid)
+ self.assertEqual(expected_peer, peer)
+
+ self.delete_peer('rbd', uuid)
+ self.assertStatus(204)
+
+ self.update_pool('rbd', 'disabled')
+ self.assertStatus(200)
+
+ def test_disable_mirror_with_peers(self):
+ self.update_pool('rbd', 'image')
+ self.assertStatus(200)
+
+ uuid = self.create_peer('rbd', 'remote', 'admin')['uuid']
+ self.assertStatus(201)
+
+ self.update_pool('rbd', 'disabled')
+ self.assertStatus(400)
+
+ self.delete_peer('rbd', uuid)
+ self.assertStatus(204)
+
+ self.update_pool('rbd', 'disabled')
+ self.assertStatus(200)
+
+ def test_site_name(self):
+ expected_site_name = {'site_name': 'site-a'}
+ self._task_put('/api/block/mirroring/site_name', expected_site_name)
+ self.assertStatus(200)
+
+ site_name = self._get('/api/block/mirroring/site_name')
+ self.assertStatus(200)
+ self.assertEqual(expected_site_name, site_name)
+
+ def test_bootstrap(self):
+ self.update_pool('rbd', 'image')
+ token_data = self._task_post('/api/block/mirroring/pool/rbd/bootstrap/token', {})
+ self.assertStatus(200)
+
+ import_data = {
+ 'token': token_data['token'],
+ 'direction': 'invalid'}
+ self._task_post('/api/block/mirroring/pool/rbd/bootstrap/peer', import_data)
+ self.assertStatus(400)
+
+ # cannot import "youself" as peer
+ import_data['direction'] = 'rx'
+ self._task_post('/api/block/mirroring/pool/rbd/bootstrap/peer', import_data)
+ self.assertStatus(400)
diff --git a/qa/tasks/mgr/dashboard/test_requests.py b/qa/tasks/mgr/dashboard/test_requests.py
new file mode 100644
index 000000000..834ba174a
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_requests.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from . import DEFAULT_API_VERSION
+from .helper import DashboardTestCase
+
+
+class RequestsTest(DashboardTestCase):
+ def test_gzip(self):
+ self._get('/api/summary')
+ self.assertHeaders({
+ 'Content-Encoding': 'gzip',
+ 'Content-Type': 'application/vnd.ceph.api.v{}+json'.format(DEFAULT_API_VERSION)
+ })
+
+ def test_force_no_gzip(self):
+ self._get('/api/summary', headers={'Accept-Encoding': 'identity'})
+ self.assertNotIn('Content-Encoding', self._resp.headers)
+ self.assertHeaders({
+ 'Content-Type': 'application/vnd.ceph.api.v{}+json'.format(DEFAULT_API_VERSION)
+ })
+
+ def test_server(self):
+ self._get('/api/summary')
+ self.assertHeaders({
+ 'server': 'Ceph-Dashboard',
+ 'Content-Type': 'application/vnd.ceph.api.v{}+json'.format(DEFAULT_API_VERSION),
+ 'Content-Security-Policy': "frame-ancestors 'self';",
+ 'X-Content-Type-Options': 'nosniff',
+ 'Strict-Transport-Security': 'max-age=63072000; includeSubDomains; preload'
+ })
diff --git a/qa/tasks/mgr/dashboard/test_rgw.py b/qa/tasks/mgr/dashboard/test_rgw.py
new file mode 100644
index 000000000..01dbae59f
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_rgw.py
@@ -0,0 +1,868 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import base64
+import logging
+import time
+from urllib import parse
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.hashes import SHA1
+from cryptography.hazmat.primitives.twofactor.totp import TOTP
+
+from .helper import DashboardTestCase, JLeaf, JList, JObj
+
+logger = logging.getLogger(__name__)
+
+
+class RgwTestCase(DashboardTestCase):
+
+ maxDiff = None
+ create_test_user = False
+
+ AUTH_ROLES = ['rgw-manager']
+
+ @classmethod
+ def setUpClass(cls):
+ super(RgwTestCase, cls).setUpClass()
+ # Create the administrator account.
+ cls._radosgw_admin_cmd([
+ 'user', 'create', '--uid', 'admin', '--display-name', 'admin',
+ '--system', '--access-key', 'admin', '--secret', 'admin'
+ ])
+ # Update the dashboard configuration.
+ cls._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'admin')
+ cls._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'admin')
+ # Create a test user?
+ if cls.create_test_user:
+ cls._radosgw_admin_cmd([
+ 'user', 'create', '--uid', 'teuth-test-user', '--display-name',
+ 'teuth-test-user'
+ ])
+ cls._radosgw_admin_cmd([
+ 'caps', 'add', '--uid', 'teuth-test-user', '--caps',
+ 'metadata=write'
+ ])
+ cls._radosgw_admin_cmd([
+ 'subuser', 'create', '--uid', 'teuth-test-user', '--subuser',
+ 'teuth-test-subuser', '--access', 'full', '--key-type', 's3',
+ '--access-key', 'xyz123'
+ ])
+ cls._radosgw_admin_cmd([
+ 'subuser', 'create', '--uid', 'teuth-test-user', '--subuser',
+ 'teuth-test-subuser2', '--access', 'full', '--key-type',
+ 'swift'
+ ])
+
+ @classmethod
+ def tearDownClass(cls):
+ # Delete administrator account.
+ cls._radosgw_admin_cmd(['user', 'rm', '--uid', 'admin'])
+ if cls.create_test_user:
+ cls._radosgw_admin_cmd(['user', 'rm', '--uid=teuth-test-user', '--purge-data'])
+ super(RgwTestCase, cls).tearDownClass()
+
+ def get_rgw_user(self, uid, stats=True):
+ return self._get('/api/rgw/user/{}?stats={}'.format(uid, stats))
+
+
+class RgwApiCredentialsTest(RgwTestCase):
+
+ AUTH_ROLES = ['rgw-manager']
+
+ def test_invalid_credentials(self):
+ self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'invalid')
+ self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'invalid')
+ resp = self._get('/api/rgw/user')
+ self.assertStatus(404)
+ self.assertIn('detail', resp)
+ self.assertIn('component', resp)
+ self.assertIn('Error connecting to Object Gateway', resp['detail'])
+ self.assertEqual(resp['component'], 'rgw')
+
+ def test_success(self):
+ # Set the default credentials.
+ self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'admin')
+ self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'admin')
+ data = self._get('/ui-api/rgw/status')
+ self.assertStatus(200)
+ self.assertIn('available', data)
+ self.assertIn('message', data)
+ self.assertTrue(data['available'])
+
+
+class RgwSiteTest(RgwTestCase):
+
+ AUTH_ROLES = ['rgw-manager']
+
+ def test_get_placement_targets(self):
+ data = self._get('/api/rgw/site?query=placement-targets')
+ self.assertStatus(200)
+ self.assertSchema(data, JObj({
+ 'zonegroup': str,
+ 'placement_targets': JList(JObj({
+ 'name': str,
+ 'data_pool': str
+ }))
+ }))
+
+ def test_get_realms(self):
+ data = self._get('/api/rgw/site?query=realms')
+ self.assertStatus(200)
+ self.assertSchema(data, JList(str))
+
+
+class RgwBucketTest(RgwTestCase):
+
+ _mfa_token_serial = '1'
+ _mfa_token_seed = '23456723'
+ _mfa_token_time_step = 2
+
+ AUTH_ROLES = ['rgw-manager']
+
+ @classmethod
+ def setUpClass(cls):
+ cls.create_test_user = True
+ super(RgwBucketTest, cls).setUpClass()
+ # Create MFA TOTP token for test user.
+ cls._radosgw_admin_cmd([
+ 'mfa', 'create', '--uid', 'teuth-test-user', '--totp-serial', cls._mfa_token_serial,
+ '--totp-seed', cls._mfa_token_seed, '--totp-seed-type', 'base32',
+ '--totp-seconds', str(cls._mfa_token_time_step), '--totp-window', '1'
+ ])
+ # Create tenanted users.
+ cls._radosgw_admin_cmd([
+ 'user', 'create', '--tenant', 'testx', '--uid', 'teuth-test-user',
+ '--display-name', 'tenanted teuth-test-user'
+ ])
+ cls._radosgw_admin_cmd([
+ 'user', 'create', '--tenant', 'testx2', '--uid', 'teuth-test-user2',
+ '--display-name', 'tenanted teuth-test-user 2'
+ ])
+
+ @classmethod
+ def tearDownClass(cls):
+ cls._radosgw_admin_cmd(
+ ['user', 'rm', '--tenant', 'testx', '--uid=teuth-test-user', '--purge-data'])
+ cls._radosgw_admin_cmd(
+ ['user', 'rm', '--tenant', 'testx2', '--uid=teuth-test-user2', '--purge-data'])
+ super(RgwBucketTest, cls).tearDownClass()
+
+ def _get_mfa_token_pin(self):
+ totp_key = base64.b32decode(self._mfa_token_seed)
+ totp = TOTP(totp_key, 6, SHA1(), self._mfa_token_time_step, backend=default_backend(),
+ enforce_key_length=False)
+ time_value = int(time.time())
+ return totp.generate(time_value)
+
+ def test_all(self):
+ # Create a new bucket.
+ self._post(
+ '/api/rgw/bucket',
+ params={
+ 'bucket': 'teuth-test-bucket',
+ 'uid': 'admin',
+ 'zonegroup': 'default',
+ 'placement_target': 'default-placement'
+ })
+ self.assertStatus(201)
+ data = self.jsonBody()
+ self.assertSchema(data, JObj(sub_elems={
+ 'bucket_info': JObj(sub_elems={
+ 'bucket': JObj(allow_unknown=True, sub_elems={
+ 'name': JLeaf(str),
+ 'bucket_id': JLeaf(str),
+ 'tenant': JLeaf(str)
+ }),
+ 'quota': JObj(sub_elems={}, allow_unknown=True),
+ 'creation_time': JLeaf(str)
+ }, allow_unknown=True)
+ }, allow_unknown=True))
+ data = data['bucket_info']['bucket']
+ self.assertEqual(data['name'], 'teuth-test-bucket')
+ self.assertEqual(data['tenant'], '')
+
+ # List all buckets.
+ data = self._get('/api/rgw/bucket', version='1.1')
+ self.assertStatus(200)
+ self.assertEqual(len(data), 1)
+ self.assertIn('teuth-test-bucket', data)
+
+ # List all buckets with stats.
+ data = self._get('/api/rgw/bucket?stats=true', version='1.1')
+ self.assertStatus(200)
+ self.assertEqual(len(data), 1)
+ self.assertSchema(data[0], JObj(sub_elems={
+ 'bid': JLeaf(str),
+ 'bucket': JLeaf(str),
+ 'bucket_quota': JObj(sub_elems={}, allow_unknown=True),
+ 'id': JLeaf(str),
+ 'owner': JLeaf(str),
+ 'usage': JObj(sub_elems={}, allow_unknown=True),
+ 'tenant': JLeaf(str),
+ }, allow_unknown=True))
+
+ # List all buckets names without stats.
+ data = self._get('/api/rgw/bucket?stats=false', version='1.1')
+ self.assertStatus(200)
+ self.assertEqual(data, ['teuth-test-bucket'])
+
+ # Get the bucket.
+ data = self._get('/api/rgw/bucket/teuth-test-bucket')
+ self.assertStatus(200)
+ self.assertSchema(data, JObj(sub_elems={
+ 'id': JLeaf(str),
+ 'bid': JLeaf(str),
+ 'tenant': JLeaf(str),
+ 'bucket': JLeaf(str),
+ 'bucket_quota': JObj(sub_elems={}, allow_unknown=True),
+ 'owner': JLeaf(str),
+ 'mfa_delete': JLeaf(str),
+ 'usage': JObj(sub_elems={}, allow_unknown=True),
+ 'versioning': JLeaf(str)
+ }, allow_unknown=True))
+ self.assertEqual(data['bucket'], 'teuth-test-bucket')
+ self.assertEqual(data['owner'], 'admin')
+ self.assertEqual(data['placement_rule'], 'default-placement')
+ self.assertEqual(data['versioning'], 'Suspended')
+
+ # Update bucket: change owner, enable versioning.
+ self._put(
+ '/api/rgw/bucket/teuth-test-bucket',
+ params={
+ 'bucket_id': data['id'],
+ 'uid': 'teuth-test-user',
+ 'versioning_state': 'Enabled'
+ })
+ self.assertStatus(200)
+ data = self._get('/api/rgw/bucket/teuth-test-bucket')
+ self.assertStatus(200)
+ self.assertSchema(data, JObj(sub_elems={
+ 'owner': JLeaf(str),
+ 'bid': JLeaf(str),
+ 'tenant': JLeaf(str)
+ }, allow_unknown=True))
+ self.assertEqual(data['owner'], 'teuth-test-user')
+ self.assertEqual(data['versioning'], 'Enabled')
+
+ # Update bucket: enable MFA Delete.
+ self._put(
+ '/api/rgw/bucket/teuth-test-bucket',
+ params={
+ 'bucket_id': data['id'],
+ 'uid': 'teuth-test-user',
+ 'versioning_state': 'Enabled',
+ 'mfa_delete': 'Enabled',
+ 'mfa_token_serial': self._mfa_token_serial,
+ 'mfa_token_pin': self._get_mfa_token_pin()
+ })
+ self.assertStatus(200)
+ data = self._get('/api/rgw/bucket/teuth-test-bucket')
+ self.assertStatus(200)
+ self.assertEqual(data['versioning'], 'Enabled')
+ self.assertEqual(data['mfa_delete'], 'Enabled')
+
+ # Update bucket: disable versioning & MFA Delete.
+ time.sleep(self._mfa_token_time_step * 3) # Required to get new TOTP pin.
+ self._put(
+ '/api/rgw/bucket/teuth-test-bucket',
+ params={
+ 'bucket_id': data['id'],
+ 'uid': 'teuth-test-user',
+ 'versioning_state': 'Suspended',
+ 'mfa_delete': 'Disabled',
+ 'mfa_token_serial': self._mfa_token_serial,
+ 'mfa_token_pin': self._get_mfa_token_pin()
+ })
+ self.assertStatus(200)
+ data = self._get('/api/rgw/bucket/teuth-test-bucket')
+ self.assertStatus(200)
+ self.assertEqual(data['versioning'], 'Suspended')
+ self.assertEqual(data['mfa_delete'], 'Disabled')
+
+ # Delete the bucket.
+ self._delete('/api/rgw/bucket/teuth-test-bucket')
+ self.assertStatus(204)
+ data = self._get('/api/rgw/bucket', version='1.1')
+ self.assertStatus(200)
+ self.assertEqual(len(data), 0)
+
+ def test_crud_w_tenant(self):
+ # Create a new bucket. The tenant of the user is used when
+ # the bucket is created.
+ self._post(
+ '/api/rgw/bucket',
+ params={
+ 'bucket': 'teuth-test-bucket',
+ 'uid': 'testx$teuth-test-user',
+ 'zonegroup': 'default',
+ 'placement_target': 'default-placement'
+ })
+ self.assertStatus(201)
+ # It's not possible to validate the result because there
+ # IS NO result object returned by the RGW Admin OPS API
+ # when a tenanted bucket is created.
+ data = self.jsonBody()
+ self.assertIsNone(data)
+
+ # List all buckets.
+ data = self._get('/api/rgw/bucket', version='1.1')
+ self.assertStatus(200)
+ self.assertEqual(len(data), 1)
+ self.assertIn('testx/teuth-test-bucket', data)
+
+ def _verify_tenant_bucket(bucket, tenant, uid):
+ full_bucket_name = '{}/{}'.format(tenant, bucket)
+ _data = self._get('/api/rgw/bucket/{}'.format(
+ parse.quote_plus(full_bucket_name)))
+ self.assertStatus(200)
+ self.assertSchema(_data, JObj(sub_elems={
+ 'owner': JLeaf(str),
+ 'bucket': JLeaf(str),
+ 'tenant': JLeaf(str),
+ 'bid': JLeaf(str)
+ }, allow_unknown=True))
+ self.assertEqual(_data['owner'], '{}${}'.format(tenant, uid))
+ self.assertEqual(_data['bucket'], bucket)
+ self.assertEqual(_data['tenant'], tenant)
+ self.assertEqual(_data['bid'], full_bucket_name)
+ return _data
+
+ # Get the bucket.
+ data = _verify_tenant_bucket('teuth-test-bucket', 'testx', 'teuth-test-user')
+ self.assertEqual(data['placement_rule'], 'default-placement')
+ self.assertEqual(data['versioning'], 'Suspended')
+
+ # Update bucket: different user with different tenant, enable versioning.
+ self._put(
+ '/api/rgw/bucket/{}'.format(
+ parse.quote_plus('testx/teuth-test-bucket')),
+ params={
+ 'bucket_id': data['id'],
+ 'uid': 'testx2$teuth-test-user2',
+ 'versioning_state': 'Enabled'
+ })
+ data = _verify_tenant_bucket('teuth-test-bucket', 'testx2', 'teuth-test-user2')
+ self.assertEqual(data['versioning'], 'Enabled')
+
+ # Change owner to a non-tenanted user
+ self._put(
+ '/api/rgw/bucket/{}'.format(
+ parse.quote_plus('testx2/teuth-test-bucket')),
+ params={
+ 'bucket_id': data['id'],
+ 'uid': 'admin'
+ })
+ self.assertStatus(200)
+ data = self._get('/api/rgw/bucket/teuth-test-bucket')
+ self.assertStatus(200)
+ self.assertIn('owner', data)
+ self.assertEqual(data['owner'], 'admin')
+ self.assertEqual(data['tenant'], '')
+ self.assertEqual(data['bucket'], 'teuth-test-bucket')
+ self.assertEqual(data['bid'], 'teuth-test-bucket')
+ self.assertEqual(data['versioning'], 'Enabled')
+
+ # Change owner back to tenanted user, suspend versioning.
+ self._put(
+ '/api/rgw/bucket/teuth-test-bucket',
+ params={
+ 'bucket_id': data['id'],
+ 'uid': 'testx$teuth-test-user',
+ 'versioning_state': 'Suspended'
+ })
+ self.assertStatus(200)
+ data = _verify_tenant_bucket('teuth-test-bucket', 'testx', 'teuth-test-user')
+ self.assertEqual(data['versioning'], 'Suspended')
+
+ # Delete the bucket.
+ self._delete('/api/rgw/bucket/{}'.format(
+ parse.quote_plus('testx/teuth-test-bucket')))
+ self.assertStatus(204)
+ data = self._get('/api/rgw/bucket', version='1.1')
+ self.assertStatus(200)
+ self.assertEqual(len(data), 0)
+
+ def test_crud_w_locking(self):
+ # Create
+ self._post('/api/rgw/bucket',
+ params={
+ 'bucket': 'teuth-test-bucket',
+ 'uid': 'teuth-test-user',
+ 'zonegroup': 'default',
+ 'placement_target': 'default-placement',
+ 'lock_enabled': 'true',
+ 'lock_mode': 'GOVERNANCE',
+ 'lock_retention_period_days': '0',
+ 'lock_retention_period_years': '1'
+ })
+ self.assertStatus(201)
+ # Read
+ data = self._get('/api/rgw/bucket/teuth-test-bucket')
+ self.assertStatus(200)
+ self.assertSchema(
+ data,
+ JObj(sub_elems={
+ 'lock_enabled': JLeaf(bool),
+ 'lock_mode': JLeaf(str),
+ 'lock_retention_period_days': JLeaf(int),
+ 'lock_retention_period_years': JLeaf(int)
+ },
+ allow_unknown=True))
+ self.assertTrue(data['lock_enabled'])
+ self.assertEqual(data['lock_mode'], 'GOVERNANCE')
+ self.assertEqual(data['lock_retention_period_days'], 0)
+ self.assertEqual(data['lock_retention_period_years'], 1)
+ # Update
+ self._put('/api/rgw/bucket/teuth-test-bucket',
+ params={
+ 'bucket_id': data['id'],
+ 'uid': 'teuth-test-user',
+ 'lock_mode': 'COMPLIANCE',
+ 'lock_retention_period_days': '15',
+ 'lock_retention_period_years': '0'
+ })
+ self.assertStatus(200)
+ data = self._get('/api/rgw/bucket/teuth-test-bucket')
+ self.assertTrue(data['lock_enabled'])
+ self.assertEqual(data['lock_mode'], 'COMPLIANCE')
+ self.assertEqual(data['lock_retention_period_days'], 15)
+ self.assertEqual(data['lock_retention_period_years'], 0)
+ self.assertStatus(200)
+
+ # Update: Disabling bucket versioning should fail if object locking enabled
+ self._put('/api/rgw/bucket/teuth-test-bucket',
+ params={
+ 'bucket_id': data['id'],
+ 'uid': 'teuth-test-user',
+ 'versioning_state': 'Suspended'
+ })
+ self.assertStatus(409)
+
+ # Delete
+ self._delete('/api/rgw/bucket/teuth-test-bucket')
+ self.assertStatus(204)
+
+
+class RgwDaemonTest(RgwTestCase):
+
+ AUTH_ROLES = ['rgw-manager']
+
+ @DashboardTestCase.RunAs('test', 'test', [{
+ 'rgw': ['create', 'update', 'delete']
+ }])
+ def test_read_access_permissions(self):
+ self._get('/api/rgw/daemon')
+ self.assertStatus(403)
+ self._get('/api/rgw/daemon/id')
+ self.assertStatus(403)
+
+ def test_list(self):
+ data = self._get('/api/rgw/daemon')
+ self.assertStatus(200)
+ self.assertEqual(len(data), 1)
+ data = data[0]
+ self.assertIn('id', data)
+ self.assertIn('version', data)
+ self.assertIn('server_hostname', data)
+ self.assertIn('zonegroup_name', data)
+ self.assertIn('zone_name', data)
+ self.assertIn('port', data)
+
+ def test_get(self):
+ data = self._get('/api/rgw/daemon')
+ self.assertStatus(200)
+
+ data = self._get('/api/rgw/daemon/{}'.format(data[0]['id']))
+ self.assertStatus(200)
+ self.assertIn('rgw_metadata', data)
+ self.assertIn('rgw_id', data)
+ self.assertIn('rgw_status', data)
+ self.assertTrue(data['rgw_metadata'])
+
+ def test_status(self):
+ data = self._get('/ui-api/rgw/status')
+ self.assertStatus(200)
+ self.assertIn('available', data)
+ self.assertIn('message', data)
+ self.assertTrue(data['available'])
+
+
+class RgwUserTest(RgwTestCase):
+
+ AUTH_ROLES = ['rgw-manager']
+
+ @classmethod
+ def setUpClass(cls):
+ super(RgwUserTest, cls).setUpClass()
+
+ def _assert_user_data(self, data):
+ self.assertSchema(data, JObj(sub_elems={
+ 'caps': JList(JObj(sub_elems={}, allow_unknown=True)),
+ 'display_name': JLeaf(str),
+ 'email': JLeaf(str),
+ 'keys': JList(JObj(sub_elems={}, allow_unknown=True)),
+ 'max_buckets': JLeaf(int),
+ 'subusers': JList(JLeaf(str)),
+ 'suspended': JLeaf(int),
+ 'swift_keys': JList(JObj(sub_elems={}, allow_unknown=True)),
+ 'tenant': JLeaf(str),
+ 'user_id': JLeaf(str),
+ 'uid': JLeaf(str)
+ }, allow_unknown=True))
+ self.assertGreaterEqual(len(data['keys']), 1)
+
+ def test_get(self):
+ data = self.get_rgw_user('admin')
+ self.assertStatus(200)
+ self._assert_user_data(data)
+ self.assertEqual(data['user_id'], 'admin')
+ self.assertTrue(data['stats'])
+ self.assertIsInstance(data['stats'], dict)
+ # Test without stats.
+ data = self.get_rgw_user('admin', False)
+ self.assertStatus(200)
+ self._assert_user_data(data)
+ self.assertEqual(data['user_id'], 'admin')
+
+ def test_list(self):
+ data = self._get('/api/rgw/user')
+ self.assertStatus(200)
+ self.assertGreaterEqual(len(data), 1)
+ self.assertIn('admin', data)
+
+ def test_get_emails(self):
+ data = self._get('/api/rgw/user/get_emails')
+ self.assertStatus(200)
+ self.assertSchema(data, JList(str))
+
+ def test_create_get_update_delete(self):
+ # Create a new user.
+ self._post('/api/rgw/user', params={
+ 'uid': 'teuth-test-user',
+ 'display_name': 'display name'
+ })
+ self.assertStatus(201)
+ data = self.jsonBody()
+ self._assert_user_data(data)
+ self.assertEqual(data['user_id'], 'teuth-test-user')
+ self.assertEqual(data['display_name'], 'display name')
+
+ # Get the user.
+ data = self.get_rgw_user('teuth-test-user')
+ self.assertStatus(200)
+ self._assert_user_data(data)
+ self.assertEqual(data['tenant'], '')
+ self.assertEqual(data['user_id'], 'teuth-test-user')
+ self.assertEqual(data['uid'], 'teuth-test-user')
+
+ # Update the user.
+ self._put(
+ '/api/rgw/user/teuth-test-user',
+ params={'display_name': 'new name'})
+ self.assertStatus(200)
+ data = self.jsonBody()
+ self._assert_user_data(data)
+ self.assertEqual(data['display_name'], 'new name')
+
+ # Delete the user.
+ self._delete('/api/rgw/user/teuth-test-user')
+ self.assertStatus(204)
+ self.get_rgw_user('teuth-test-user')
+ self.assertStatus(500)
+ resp = self.jsonBody()
+ self.assertIn('detail', resp)
+ self.assertIn('failed request with status code 404', resp['detail'])
+ self.assertIn('"Code":"NoSuchUser"', resp['detail'])
+ self.assertIn('"HostId"', resp['detail'])
+ self.assertIn('"RequestId"', resp['detail'])
+
+ def test_create_get_update_delete_w_tenant(self):
+ # Create a new user.
+ self._post(
+ '/api/rgw/user',
+ params={
+ 'uid': 'test01$teuth-test-user',
+ 'display_name': 'display name'
+ })
+ self.assertStatus(201)
+ data = self.jsonBody()
+ self._assert_user_data(data)
+ self.assertEqual(data['user_id'], 'teuth-test-user')
+ self.assertEqual(data['display_name'], 'display name')
+
+ # Get the user.
+ data = self.get_rgw_user('test01$teuth-test-user')
+ self.assertStatus(200)
+ self._assert_user_data(data)
+ self.assertEqual(data['tenant'], 'test01')
+ self.assertEqual(data['user_id'], 'teuth-test-user')
+ self.assertEqual(data['uid'], 'test01$teuth-test-user')
+
+ # Update the user.
+ self._put(
+ '/api/rgw/user/test01$teuth-test-user',
+ params={'display_name': 'new name'})
+ self.assertStatus(200)
+ data = self.jsonBody()
+ self._assert_user_data(data)
+ self.assertEqual(data['display_name'], 'new name')
+
+ # Delete the user.
+ self._delete('/api/rgw/user/test01$teuth-test-user')
+ self.assertStatus(204)
+ self.get_rgw_user('test01$teuth-test-user')
+ self.assertStatus(500)
+ resp = self.jsonBody()
+ self.assertIn('detail', resp)
+ self.assertIn('failed request with status code 404', resp['detail'])
+ self.assertIn('"Code":"NoSuchUser"', resp['detail'])
+ self.assertIn('"HostId"', resp['detail'])
+ self.assertIn('"RequestId"', resp['detail'])
+
+
+class RgwUserCapabilityTest(RgwTestCase):
+
+ AUTH_ROLES = ['rgw-manager']
+
+ @classmethod
+ def setUpClass(cls):
+ cls.create_test_user = True
+ super(RgwUserCapabilityTest, cls).setUpClass()
+
+ def test_set(self):
+ self._post(
+ '/api/rgw/user/teuth-test-user/capability',
+ params={
+ 'type': 'usage',
+ 'perm': 'read'
+ })
+ self.assertStatus(201)
+ data = self.jsonBody()
+ self.assertEqual(len(data), 1)
+ data = data[0]
+ self.assertEqual(data['type'], 'usage')
+ self.assertEqual(data['perm'], 'read')
+
+ # Get the user data to validate the capabilities.
+ data = self.get_rgw_user('teuth-test-user')
+ self.assertStatus(200)
+ self.assertGreaterEqual(len(data['caps']), 1)
+ self.assertEqual(data['caps'][0]['type'], 'usage')
+ self.assertEqual(data['caps'][0]['perm'], 'read')
+
+ def test_delete(self):
+ self._delete(
+ '/api/rgw/user/teuth-test-user/capability',
+ params={
+ 'type': 'metadata',
+ 'perm': 'write'
+ })
+ self.assertStatus(204)
+
+ # Get the user data to validate the capabilities.
+ data = self.get_rgw_user('teuth-test-user')
+ self.assertStatus(200)
+ self.assertEqual(len(data['caps']), 0)
+
+
+class RgwUserKeyTest(RgwTestCase):
+
+ AUTH_ROLES = ['rgw-manager']
+
+ @classmethod
+ def setUpClass(cls):
+ cls.create_test_user = True
+ super(RgwUserKeyTest, cls).setUpClass()
+
+ def test_create_s3(self):
+ self._post(
+ '/api/rgw/user/teuth-test-user/key',
+ params={
+ 'key_type': 's3',
+ 'generate_key': 'false',
+ 'access_key': 'abc987',
+ 'secret_key': 'aaabbbccc'
+ })
+ data = self.jsonBody()
+ self.assertStatus(201)
+ self.assertGreaterEqual(len(data), 3)
+ key = self.find_object_in_list('access_key', 'abc987', data)
+ self.assertIsInstance(key, object)
+ self.assertEqual(key['secret_key'], 'aaabbbccc')
+
+ def test_create_swift(self):
+ self._post(
+ '/api/rgw/user/teuth-test-user/key',
+ params={
+ 'key_type': 'swift',
+ 'subuser': 'teuth-test-subuser',
+ 'generate_key': 'false',
+ 'secret_key': 'xxxyyyzzz'
+ })
+ data = self.jsonBody()
+ self.assertStatus(201)
+ self.assertGreaterEqual(len(data), 2)
+ key = self.find_object_in_list('secret_key', 'xxxyyyzzz', data)
+ self.assertIsInstance(key, object)
+
+ def test_delete_s3(self):
+ self._delete(
+ '/api/rgw/user/teuth-test-user/key',
+ params={
+ 'key_type': 's3',
+ 'access_key': 'xyz123'
+ })
+ self.assertStatus(204)
+
+ def test_delete_swift(self):
+ self._delete(
+ '/api/rgw/user/teuth-test-user/key',
+ params={
+ 'key_type': 'swift',
+ 'subuser': 'teuth-test-user:teuth-test-subuser2'
+ })
+ self.assertStatus(204)
+
+
+class RgwUserQuotaTest(RgwTestCase):
+
+ AUTH_ROLES = ['rgw-manager']
+
+ @classmethod
+ def setUpClass(cls):
+ cls.create_test_user = True
+ super(RgwUserQuotaTest, cls).setUpClass()
+
+ def _assert_quota(self, data):
+ self.assertIn('user_quota', data)
+ self.assertIn('max_objects', data['user_quota'])
+ self.assertIn('enabled', data['user_quota'])
+ self.assertIn('max_size_kb', data['user_quota'])
+ self.assertIn('max_size', data['user_quota'])
+ self.assertIn('bucket_quota', data)
+ self.assertIn('max_objects', data['bucket_quota'])
+ self.assertIn('enabled', data['bucket_quota'])
+ self.assertIn('max_size_kb', data['bucket_quota'])
+ self.assertIn('max_size', data['bucket_quota'])
+
+ def test_get_quota(self):
+ data = self._get('/api/rgw/user/teuth-test-user/quota')
+ self.assertStatus(200)
+ self._assert_quota(data)
+
+ def test_set_user_quota(self):
+ self._put(
+ '/api/rgw/user/teuth-test-user/quota',
+ params={
+ 'quota_type': 'user',
+ 'enabled': 'true',
+ 'max_size_kb': 2048,
+ 'max_objects': 101
+ })
+ self.assertStatus(200)
+
+ data = self._get('/api/rgw/user/teuth-test-user/quota')
+ self.assertStatus(200)
+ self._assert_quota(data)
+ self.assertEqual(data['user_quota']['max_objects'], 101)
+ self.assertTrue(data['user_quota']['enabled'])
+ self.assertEqual(data['user_quota']['max_size_kb'], 2048)
+
+ def test_set_bucket_quota(self):
+ self._put(
+ '/api/rgw/user/teuth-test-user/quota',
+ params={
+ 'quota_type': 'bucket',
+ 'enabled': 'false',
+ 'max_size_kb': 4096,
+ 'max_objects': 2000
+ })
+ self.assertStatus(200)
+
+ data = self._get('/api/rgw/user/teuth-test-user/quota')
+ self.assertStatus(200)
+ self._assert_quota(data)
+ self.assertEqual(data['bucket_quota']['max_objects'], 2000)
+ self.assertFalse(data['bucket_quota']['enabled'])
+ self.assertEqual(data['bucket_quota']['max_size_kb'], 4096)
+
+
+class RgwUserSubuserTest(RgwTestCase):
+
+ AUTH_ROLES = ['rgw-manager']
+
+ @classmethod
+ def setUpClass(cls):
+ cls.create_test_user = True
+ super(RgwUserSubuserTest, cls).setUpClass()
+
+ def test_create_swift(self):
+ self._post(
+ '/api/rgw/user/teuth-test-user/subuser',
+ params={
+ 'subuser': 'tux',
+ 'access': 'readwrite',
+ 'key_type': 'swift'
+ })
+ self.assertStatus(200)
+ data = self.jsonBody()
+ subuser = self.find_object_in_list('id', 'teuth-test-user:tux', data)
+ self.assertIsInstance(subuser, object)
+ self.assertEqual(subuser['permissions'], 'read-write')
+
+ # Get the user data to validate the keys.
+ data = self.get_rgw_user('teuth-test-user')
+ self.assertStatus(200)
+ key = self.find_object_in_list('user', 'teuth-test-user:tux',
+ data['swift_keys'])
+ self.assertIsInstance(key, object)
+
+ def test_create_s3(self):
+ self._post(
+ '/api/rgw/user/teuth-test-user/subuser',
+ params={
+ 'subuser': 'hugo',
+ 'access': 'write',
+ 'generate_secret': 'false',
+ 'access_key': 'yyy',
+ 'secret_key': 'xxx'
+ })
+ self.assertStatus(200)
+ data = self.jsonBody()
+ subuser = self.find_object_in_list('id', 'teuth-test-user:hugo', data)
+ self.assertIsInstance(subuser, object)
+ self.assertEqual(subuser['permissions'], 'write')
+
+ # Get the user data to validate the keys.
+ data = self.get_rgw_user('teuth-test-user')
+ self.assertStatus(200)
+ key = self.find_object_in_list('user', 'teuth-test-user:hugo',
+ data['keys'])
+ self.assertIsInstance(key, object)
+ self.assertEqual(key['secret_key'], 'xxx')
+
+ def test_delete_w_purge(self):
+ self._delete(
+ '/api/rgw/user/teuth-test-user/subuser/teuth-test-subuser2')
+ self.assertStatus(204)
+
+ # Get the user data to check that the keys don't exist anymore.
+ data = self.get_rgw_user('teuth-test-user')
+ self.assertStatus(200)
+ key = self.find_object_in_list(
+ 'user', 'teuth-test-user:teuth-test-subuser2', data['swift_keys'])
+ self.assertIsNone(key)
+
+ def test_delete_wo_purge(self):
+ self._delete(
+ '/api/rgw/user/teuth-test-user/subuser/teuth-test-subuser',
+ params={'purge_keys': 'false'})
+ self.assertStatus(204)
+
+ # Get the user data to check whether they keys still exist.
+ data = self.get_rgw_user('teuth-test-user')
+ self.assertStatus(200)
+ key = self.find_object_in_list(
+ 'user', 'teuth-test-user:teuth-test-subuser', data['keys'])
+ self.assertIsInstance(key, object)
diff --git a/qa/tasks/mgr/dashboard/test_role.py b/qa/tasks/mgr/dashboard/test_role.py
new file mode 100644
index 000000000..dbfaea9e4
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_role.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from .helper import DashboardTestCase
+
+
+class RoleTest(DashboardTestCase):
+ @classmethod
+ def _create_role(cls, name=None, description=None, scopes_permissions=None):
+ data = {}
+ if name:
+ data['name'] = name
+ if description:
+ data['description'] = description
+ if scopes_permissions:
+ data['scopes_permissions'] = scopes_permissions
+ cls._post('/api/role', data)
+
+ def test_crud_role(self):
+ self._create_role(name='role1',
+ description='Description 1',
+ scopes_permissions={'osd': ['read']})
+ self.assertStatus(201)
+ self.assertJsonBody({
+ 'name': 'role1',
+ 'description': 'Description 1',
+ 'scopes_permissions': {'osd': ['read']},
+ 'system': False
+ })
+
+ self._get('/api/role/role1')
+ self.assertStatus(200)
+ self.assertJsonBody({
+ 'name': 'role1',
+ 'description': 'Description 1',
+ 'scopes_permissions': {'osd': ['read']},
+ 'system': False
+ })
+
+ self._put('/api/role/role1', {
+ 'description': 'Description 2',
+ 'scopes_permissions': {'osd': ['read', 'update']},
+ })
+ self.assertStatus(200)
+ self.assertJsonBody({
+ 'name': 'role1',
+ 'description': 'Description 2',
+ 'scopes_permissions': {'osd': ['read', 'update']},
+ 'system': False
+ })
+
+ self._delete('/api/role/role1')
+ self.assertStatus(204)
+
+ def test_list_roles(self):
+ roles = self._get('/api/role')
+ self.assertStatus(200)
+
+ self.assertGreaterEqual(len(roles), 1)
+ for role in roles:
+ self.assertIn('name', role)
+ self.assertIn('description', role)
+ self.assertIn('scopes_permissions', role)
+ self.assertIn('system', role)
+
+ def test_get_role_does_not_exist(self):
+ self._get('/api/role/role2')
+ self.assertStatus(404)
+
+ def test_create_role_already_exists(self):
+ self._create_role(name='read-only',
+ description='Description 1',
+ scopes_permissions={'osd': ['read']})
+ self.assertStatus(400)
+ self.assertError(code='role_already_exists',
+ component='role')
+
+ def test_create_role_no_name(self):
+ self._create_role(description='Description 1',
+ scopes_permissions={'osd': ['read']})
+ self.assertStatus(400)
+ self.assertError(code='name_required',
+ component='role')
+
+ def test_create_role_invalid_scope(self):
+ self._create_role(name='role1',
+ description='Description 1',
+ scopes_permissions={'invalid-scope': ['read']})
+ self.assertStatus(400)
+ self.assertError(code='invalid_scope',
+ component='role')
+
+ def test_create_role_invalid_permission(self):
+ self._create_role(name='role1',
+ description='Description 1',
+ scopes_permissions={'osd': ['invalid-permission']})
+ self.assertStatus(400)
+ self.assertError(code='invalid_permission',
+ component='role')
+
+ def test_delete_role_does_not_exist(self):
+ self._delete('/api/role/role2')
+ self.assertStatus(404)
+
+ def test_delete_system_role(self):
+ self._delete('/api/role/read-only')
+ self.assertStatus(400)
+ self.assertError(code='cannot_delete_system_role',
+ component='role')
+
+ def test_delete_role_associated_with_user(self):
+ self.create_user("user", "user", ['read-only'])
+ self._create_role(name='role1',
+ description='Description 1',
+ scopes_permissions={'user': ['create', 'read', 'update', 'delete']})
+ self.assertStatus(201)
+ self._put('/api/user/user', {'roles': ['role1']})
+ self.assertStatus(200)
+
+ self._delete('/api/role/role1')
+ self.assertStatus(400)
+ self.assertError(code='role_is_associated_with_user',
+ component='role')
+
+ self._put('/api/user/user', {'roles': ['administrator']})
+ self.assertStatus(200)
+ self._delete('/api/role/role1')
+ self.assertStatus(204)
+ self.delete_user("user")
+
+ def test_update_role_does_not_exist(self):
+ self._put('/api/role/role2', {})
+ self.assertStatus(404)
+
+ def test_update_system_role(self):
+ self._put('/api/role/read-only', {})
+ self.assertStatus(400)
+ self.assertError(code='cannot_update_system_role',
+ component='role')
+
+ def test_clone_role(self):
+ self._post('/api/role/read-only/clone', {'new_name': 'foo'})
+ self.assertStatus(201)
+ self._delete('/api/role/foo')
diff --git a/qa/tasks/mgr/dashboard/test_settings.py b/qa/tasks/mgr/dashboard/test_settings.py
new file mode 100644
index 000000000..d6ad1e762
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_settings.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from .helper import DashboardTestCase, JAny, JList, JObj
+
+
+class SettingsTest(DashboardTestCase):
+ def setUp(self):
+ super(SettingsTest, self).setUp()
+ self.settings = self._get('/api/settings')
+
+ def tearDown(self):
+ self._put(
+ '/api/settings',
+ {setting['name']: setting['value']
+ for setting in self.settings})
+
+ def test_list_settings(self):
+ settings = self._get('/api/settings')
+ self.assertGreater(len(settings), 10)
+ self.assertSchema(
+ settings,
+ JList(
+ JObj({
+ 'default': JAny(none=False),
+ 'name': str,
+ 'type': str,
+ 'value': JAny(none=False)
+ })))
+ self.assertStatus(200)
+
+ def test_get_setting(self):
+ setting = self._get('/api/settings/rgw-api-access-key')
+ self.assertSchema(
+ setting,
+ JObj({
+ 'default': JAny(none=False),
+ 'name': str,
+ 'type': str,
+ 'value': JAny(none=False)
+ }))
+ self.assertStatus(200)
+
+ def test_set_setting(self):
+ self._put('/api/settings/rgw-api-access-key', {'value': 'foo'})
+ self.assertStatus(200)
+
+ value = self._get('/api/settings/rgw-api-access-key')['value']
+ self.assertEqual('foo', value)
+
+ def test_bulk_set(self):
+ self._put('/api/settings', {
+ 'RGW_API_ACCESS_KEY': 'dummy-key',
+ 'RGW_API_SECRET_KEY': 'dummy-secret',
+ })
+ self.assertStatus(200)
+
+ access_key = self._get('/api/settings/rgw-api-access-key')['value']
+ self.assertStatus(200)
+ self.assertEqual('dummy-key', access_key)
+
+ secret_key = self._get('/api/settings/rgw-api-secret-key')['value']
+ self.assertStatus(200)
+ self.assertEqual('dummy-secret', secret_key)
diff --git a/qa/tasks/mgr/dashboard/test_summary.py b/qa/tasks/mgr/dashboard/test_summary.py
new file mode 100644
index 000000000..a31f89146
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_summary.py
@@ -0,0 +1,39 @@
+from __future__ import absolute_import
+
+from .helper import DashboardTestCase
+
+
+class SummaryTest(DashboardTestCase):
+ CEPHFS = True
+
+ def test_summary(self):
+ data = self._get("/api/summary")
+ self.assertStatus(200)
+
+ self.assertIn('health_status', data)
+ self.assertIn('mgr_id', data)
+ self.assertIn('have_mon_connection', data)
+ self.assertIn('rbd_mirroring', data)
+ self.assertIn('executing_tasks', data)
+ self.assertIn('finished_tasks', data)
+ self.assertIn('version', data)
+ self.assertIsNotNone(data['health_status'])
+ self.assertIsNotNone(data['mgr_id'])
+ self.assertIsNotNone(data['have_mon_connection'])
+ self.assertEqual(data['rbd_mirroring'], {'errors': 0, 'warnings': 0})
+
+ @DashboardTestCase.RunAs('test', 'test', ['pool-manager'])
+ def test_summary_permissions(self):
+ data = self._get("/api/summary")
+ self.assertStatus(200)
+
+ self.assertIn('health_status', data)
+ self.assertIn('mgr_id', data)
+ self.assertIn('have_mon_connection', data)
+ self.assertNotIn('rbd_mirroring', data)
+ self.assertIn('executing_tasks', data)
+ self.assertIn('finished_tasks', data)
+ self.assertIn('version', data)
+ self.assertIsNotNone(data['health_status'])
+ self.assertIsNotNone(data['mgr_id'])
+ self.assertIsNotNone(data['have_mon_connection'])
diff --git a/qa/tasks/mgr/dashboard/test_telemetry.py b/qa/tasks/mgr/dashboard/test_telemetry.py
new file mode 100644
index 000000000..65c62c748
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_telemetry.py
@@ -0,0 +1,98 @@
+from .helper import DashboardTestCase, JObj
+
+
+class TelemetryTest(DashboardTestCase):
+
+ pre_enabled_status = True
+
+ @classmethod
+ def setUpClass(cls):
+ super(TelemetryTest, cls).setUpClass()
+ data = cls._get('/api/mgr/module/telemetry')
+ cls.pre_enabled_status = data['enabled']
+
+ # identify ourselves so we can filter these reports out on the server side
+ cls._put(
+ '/api/settings',
+ {
+ 'mgr/telemetry/channel_ident': True,
+ 'mgr/telemetry/organization': 'ceph-qa',
+ }
+ )
+
+ @classmethod
+ def tearDownClass(cls):
+ if cls.pre_enabled_status:
+ cls._enable_module()
+ else:
+ cls._disable_module()
+ super(TelemetryTest, cls).tearDownClass()
+
+ def test_disable_module(self):
+ self._enable_module()
+ self._check_telemetry_enabled(True)
+ self._disable_module()
+ self._check_telemetry_enabled(False)
+
+ def test_enable_module_correct_license(self):
+ self._disable_module()
+ self._check_telemetry_enabled(False)
+
+ self._put('/api/telemetry', {
+ 'enable': True,
+ 'license_name': 'sharing-1-0'
+ })
+ self.assertStatus(200)
+ self._check_telemetry_enabled(True)
+
+ def test_enable_module_empty_license(self):
+ self._disable_module()
+ self._check_telemetry_enabled(False)
+
+ self._put('/api/telemetry', {
+ 'enable': True,
+ 'license_name': ''
+ })
+ self.assertStatus(400)
+ self.assertError(code='telemetry_enable_license_missing')
+ self._check_telemetry_enabled(False)
+
+ def test_enable_module_invalid_license(self):
+ self._disable_module()
+ self._check_telemetry_enabled(False)
+
+ self._put('/api/telemetry', {
+ 'enable': True,
+ 'license_name': 'invalid-license'
+ })
+ self.assertStatus(400)
+ self.assertError(code='telemetry_enable_license_missing')
+ self._check_telemetry_enabled(False)
+
+ def test_get_report(self):
+ self._enable_module()
+ data = self._get('/api/telemetry/report')
+ self.assertStatus(200)
+ schema = JObj({
+ 'report': JObj({}, allow_unknown=True),
+ 'device_report': JObj({}, allow_unknown=True)
+ })
+ self.assertSchema(data, schema)
+
+ @classmethod
+ def _enable_module(cls):
+ cls._put('/api/telemetry', {
+ 'enable': True,
+ 'license_name': 'sharing-1-0'
+ })
+
+ @classmethod
+ def _disable_module(cls):
+ cls._put('/api/telemetry', {
+ 'enable': False
+ })
+
+ def _check_telemetry_enabled(self, enabled):
+ data = self._get('/api/mgr/module/telemetry')
+ self.assertStatus(200)
+ self.assertEqual(data['enabled'], enabled)
diff --git a/qa/tasks/mgr/dashboard/test_user.py b/qa/tasks/mgr/dashboard/test_user.py
new file mode 100644
index 000000000..3a6464f5a
--- /dev/null
+++ b/qa/tasks/mgr/dashboard/test_user.py
@@ -0,0 +1,565 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=too-many-public-methods
+
+from __future__ import absolute_import
+
+import time
+from datetime import datetime, timedelta
+
+from .helper import DashboardTestCase
+
+
+class UserTest(DashboardTestCase):
+ @classmethod
+ def setUpClass(cls):
+ super(UserTest, cls).setUpClass()
+ cls._ceph_cmd(['dashboard', 'set-pwd-policy-enabled', 'true'])
+ cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-length-enabled', 'true'])
+ cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-oldpwd-enabled', 'true'])
+ cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-username-enabled', 'true'])
+ cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-exclusion-list-enabled', 'true'])
+ cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-complexity-enabled', 'true'])
+ cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-sequential-chars-enabled', 'true'])
+ cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-repetitive-chars-enabled', 'true'])
+
+ @classmethod
+ def tearDownClass(cls):
+ cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-username-enabled', 'false'])
+ cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-exclusion-list-enabled', 'false'])
+ cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-complexity-enabled', 'false'])
+ cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-sequential-chars-enabled', 'false'])
+ cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-repetitive-chars-enabled', 'false'])
+ super(UserTest, cls).tearDownClass()
+
+ @classmethod
+ def _create_user(cls, username=None, password=None, name=None, email=None, roles=None,
+ enabled=True, pwd_expiration_date=None, pwd_update_required=False):
+ data = {}
+ if username:
+ data['username'] = username
+ if password:
+ data['password'] = password
+ if name:
+ data['name'] = name
+ if email:
+ data['email'] = email
+ if roles:
+ data['roles'] = roles
+ if pwd_expiration_date:
+ data['pwdExpirationDate'] = pwd_expiration_date
+ data['pwdUpdateRequired'] = pwd_update_required
+ data['enabled'] = enabled
+ cls._post("/api/user", data)
+
+ @classmethod
+ def _reset_login_to_admin(cls, username=None):
+ cls.logout()
+ if username:
+ cls.delete_user(username)
+ cls.login('admin', 'admin')
+
+ def test_crud_user(self):
+ self._create_user(username='user1',
+ password='mypassword10#',
+ name='My Name',
+ email='my@email.com',
+ roles=['administrator'])
+ self.assertStatus(201)
+ user = self.jsonBody()
+
+ self._get('/api/user/user1')
+ self.assertStatus(200)
+ self.assertJsonBody({
+ 'username': 'user1',
+ 'name': 'My Name',
+ 'email': 'my@email.com',
+ 'roles': ['administrator'],
+ 'lastUpdate': user['lastUpdate'],
+ 'enabled': True,
+ 'pwdExpirationDate': None,
+ 'pwdUpdateRequired': False
+ })
+
+ self._put('/api/user/user1', {
+ 'name': 'My New Name',
+ 'email': 'mynew@email.com',
+ 'roles': ['block-manager'],
+ })
+ self.assertStatus(200)
+ user = self.jsonBody()
+ self.assertJsonBody({
+ 'username': 'user1',
+ 'name': 'My New Name',
+ 'email': 'mynew@email.com',
+ 'roles': ['block-manager'],
+ 'lastUpdate': user['lastUpdate'],
+ 'enabled': True,
+ 'pwdExpirationDate': None,
+ 'pwdUpdateRequired': False
+ })
+
+ self._delete('/api/user/user1')
+ self.assertStatus(204)
+
+ def test_crd_disabled_user(self):
+ self._create_user(username='klara',
+ password='mypassword10#',
+ name='Klara Musterfrau',
+ email='klara@musterfrau.com',
+ roles=['administrator'],
+ enabled=False)
+ self.assertStatus(201)
+ user = self.jsonBody()
+
+ # Restart dashboard module.
+ self._unload_module('dashboard')
+ self._load_module('dashboard')
+ time.sleep(10)
+
+ self._get('/api/user/klara')
+ self.assertStatus(200)
+ self.assertJsonBody({
+ 'username': 'klara',
+ 'name': 'Klara Musterfrau',
+ 'email': 'klara@musterfrau.com',
+ 'roles': ['administrator'],
+ 'lastUpdate': user['lastUpdate'],
+ 'enabled': False,
+ 'pwdExpirationDate': None,
+ 'pwdUpdateRequired': False
+ })
+
+ self._delete('/api/user/klara')
+ self.assertStatus(204)
+
+ def test_list_users(self):
+ self._get('/api/user')
+ self.assertStatus(200)
+ user = self.jsonBody()
+ self.assertEqual(len(user), 1)
+ user = user[0]
+ self.assertJsonBody([{
+ 'username': 'admin',
+ 'name': None,
+ 'email': None,
+ 'roles': ['administrator'],
+ 'lastUpdate': user['lastUpdate'],
+ 'enabled': True,
+ 'pwdExpirationDate': None,
+ 'pwdUpdateRequired': False
+ }])
+
+ def test_create_user_already_exists(self):
+ self._create_user(username='admin',
+ password='mypassword10#',
+ name='administrator',
+ email='my@email.com',
+ roles=['administrator'])
+ self.assertStatus(400)
+ self.assertError(code='username_already_exists',
+ component='user')
+
+ def test_create_user_invalid_role(self):
+ self._create_user(username='user1',
+ password='mypassword10#',
+ name='My Name',
+ email='my@email.com',
+ roles=['invalid-role'])
+ self.assertStatus(400)
+ self.assertError(code='role_does_not_exist',
+ component='user')
+
+ def test_create_user_invalid_chars_in_name(self):
+ self._create_user(username='userö',
+ password='mypassword10#',
+ name='administrator',
+ email='my@email.com',
+ roles=['administrator'])
+ self.assertStatus(400)
+ self.assertError(code='ceph_type_not_valid',
+ component='user')
+
+ def test_delete_user_does_not_exist(self):
+ self._delete('/api/user/user2')
+ self.assertStatus(404)
+
+ @DashboardTestCase.RunAs('test', 'test', [{'user': ['create', 'read', 'update', 'delete']}])
+ def test_delete_current_user(self):
+ self._delete('/api/user/test')
+ self.assertStatus(400)
+ self.assertError(code='cannot_delete_current_user',
+ component='user')
+
+ @DashboardTestCase.RunAs('test', 'test', [{'user': ['create', 'read', 'update', 'delete']}])
+ def test_disable_current_user(self):
+ self._put('/api/user/test', {'enabled': False})
+ self.assertStatus(400)
+ self.assertError(code='cannot_disable_current_user',
+ component='user')
+
+ def test_update_user_does_not_exist(self):
+ self._put('/api/user/user2', {'name': 'My New Name'})
+ self.assertStatus(404)
+
+ def test_update_user_invalid_role(self):
+ self._put('/api/user/admin', {'roles': ['invalid-role']})
+ self.assertStatus(400)
+ self.assertError(code='role_does_not_exist',
+ component='user')
+
+ def test_change_password_from_other_user(self):
+ self._post('/api/user/test2/change_password', {
+ 'old_password': 'abc',
+ 'new_password': 'xyz'
+ })
+ self.assertStatus(400)
+ self.assertError(code='invalid_user_context', component='user')
+
+ def test_change_password_old_not_match(self):
+ self._post('/api/user/admin/change_password', {
+ 'old_password': 'foo',
+ 'new_password': 'bar'
+ })
+ self.assertStatus(400)
+ self.assertError(code='invalid_old_password', component='user')
+
+ def test_change_password_as_old_password(self):
+ self.create_user('test1', 'mypassword10#', ['read-only'], force_password=False)
+ self.login('test1', 'mypassword10#')
+ self._post('/api/user/test1/change_password', {
+ 'old_password': 'mypassword10#',
+ 'new_password': 'mypassword10#'
+ })
+ self.assertStatus(400)
+ self.assertError('password_policy_validation_failed', 'user',
+ 'Password must not be the same as the previous one.')
+ self._reset_login_to_admin('test1')
+
+ def test_change_password_contains_username(self):
+ self.create_user('test1', 'mypassword10#', ['read-only'], force_password=False)
+ self.login('test1', 'mypassword10#')
+ self._post('/api/user/test1/change_password', {
+ 'old_password': 'mypassword10#',
+ 'new_password': 'mypasstest1@#'
+ })
+ self.assertStatus(400)
+ self.assertError('password_policy_validation_failed', 'user',
+ 'Password must not contain username.')
+ self._reset_login_to_admin('test1')
+
+ def test_change_password_contains_forbidden_words(self):
+ self.create_user('test1', 'mypassword10#', ['read-only'], force_password=False)
+ self.login('test1', 'mypassword10#')
+ self._post('/api/user/test1/change_password', {
+ 'old_password': 'mypassword10#',
+ 'new_password': 'mypassOSD01'
+ })
+ self.assertStatus(400)
+ self.assertError('password_policy_validation_failed', 'user',
+ 'Password must not contain the keyword "OSD".')
+ self._reset_login_to_admin('test1')
+
+ def test_change_password_contains_sequential_characters(self):
+ self.create_user('test1', 'mypassword10#', ['read-only'], force_password=False)
+ self.login('test1', 'mypassword10#')
+ self._post('/api/user/test1/change_password', {
+ 'old_password': 'mypassword10#',
+ 'new_password': 'mypass123456!@$'
+ })
+ self.assertStatus(400)
+ self.assertError('password_policy_validation_failed', 'user',
+ 'Password must not contain sequential characters.')
+ self._reset_login_to_admin('test1')
+
+ def test_change_password_contains_repetetive_characters(self):
+ self.create_user('test1', 'mypassword10#', ['read-only'], force_password=False)
+ self.login('test1', 'mypassword10#')
+ self._post('/api/user/test1/change_password', {
+ 'old_password': 'mypassword10#',
+ 'new_password': 'aaaaA1@!#'
+ })
+ self.assertStatus(400)
+ self.assertError('password_policy_validation_failed', 'user',
+ 'Password must not contain repetitive characters.')
+ self._reset_login_to_admin('test1')
+
+ @DashboardTestCase.RunAs('test1', 'mypassword10#', ['read-only'], False)
+ def test_change_password(self):
+ self._post('/api/user/test1/change_password', {
+ 'old_password': 'mypassword10#',
+ 'new_password': 'newpassword01#'
+ })
+ self.assertStatus(200)
+ self.logout()
+ self._post('/api/auth', {'username': 'test1', 'password': 'mypassword10#'})
+ self.assertStatus(400)
+ self.assertError(code='invalid_credentials', component='auth')
+
+ def test_create_user_password_cli(self):
+ exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-create',
+ 'test1'],
+ 'mypassword10#',
+ return_exit_code=True)
+ self.assertEqual(exitcode, 0)
+ self.delete_user('test1')
+
+ @DashboardTestCase.RunAs('test2', 'foo_bar_10#', force_password=False, login=False)
+ def test_change_user_password_cli(self):
+ exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password',
+ 'test2'],
+ 'foo_new-password01#',
+ return_exit_code=True)
+ self.assertEqual(exitcode, 0)
+
+ def test_create_user_password_force_cli(self):
+ exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-create',
+ '--force-password', 'test11'],
+ 'bar',
+ return_exit_code=True)
+ self.assertEqual(exitcode, 0)
+ self.delete_user('test11')
+
+ @DashboardTestCase.RunAs('test22', 'foo_bar_10#', force_password=False, login=False)
+ def test_change_user_password_force_cli(self):
+ exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password',
+ '--force-password', 'test22'],
+ 'bar',
+ return_exit_code=True)
+ self.assertEqual(exitcode, 0)
+
+ def test_create_user_password_cli_fail(self):
+ exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-create',
+ 'test3'],
+ 'foo',
+ return_exit_code=True)
+ self.assertNotEqual(exitcode, 0)
+
+ @DashboardTestCase.RunAs('test4', 'x1z_tst+_10#', force_password=False, login=False)
+ def test_change_user_password_cli_fail(self):
+ exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password',
+ 'test4'],
+ 'bar',
+ return_exit_code=True)
+ self.assertNotEqual(exitcode, 0)
+
+ def test_create_user_with_pwd_expiration_date(self):
+ future_date = datetime.utcnow() + timedelta(days=10)
+ future_date = int(time.mktime(future_date.timetuple()))
+
+ self._create_user(username='user1',
+ password='mypassword10#',
+ name='My Name',
+ email='my@email.com',
+ roles=['administrator'],
+ pwd_expiration_date=future_date)
+ self.assertStatus(201)
+ user = self.jsonBody()
+
+ self._get('/api/user/user1')
+ self.assertStatus(200)
+ self.assertJsonBody({
+ 'username': 'user1',
+ 'name': 'My Name',
+ 'email': 'my@email.com',
+ 'roles': ['administrator'],
+ 'lastUpdate': user['lastUpdate'],
+ 'enabled': True,
+ 'pwdExpirationDate': future_date,
+ 'pwdUpdateRequired': False
+ })
+ self._delete('/api/user/user1')
+
+ def test_create_with_pwd_expiration_date_not_valid(self):
+ past_date = datetime.utcnow() - timedelta(days=10)
+ past_date = int(time.mktime(past_date.timetuple()))
+
+ self._create_user(username='user1',
+ password='mypassword10#',
+ name='My Name',
+ email='my@email.com',
+ roles=['administrator'],
+ pwd_expiration_date=past_date)
+ self.assertStatus(400)
+ self.assertError(code='pwd_past_expiration_date', component='user')
+
+ def test_create_with_default_expiration_date(self):
+ future_date_1 = datetime.utcnow() + timedelta(days=9)
+ future_date_1 = int(time.mktime(future_date_1.timetuple()))
+ future_date_2 = datetime.utcnow() + timedelta(days=11)
+ future_date_2 = int(time.mktime(future_date_2.timetuple()))
+
+ self._ceph_cmd(['dashboard', 'set-user-pwd-expiration-span', '10'])
+ self._create_user(username='user1',
+ password='mypassword10#',
+ name='My Name',
+ email='my@email.com',
+ roles=['administrator'])
+ self.assertStatus(201)
+
+ user = self._get('/api/user/user1')
+ self.assertStatus(200)
+ self.assertIsNotNone(user['pwdExpirationDate'])
+ self.assertGreater(user['pwdExpirationDate'], future_date_1)
+ self.assertLess(user['pwdExpirationDate'], future_date_2)
+
+ self._delete('/api/user/user1')
+ self._ceph_cmd(['dashboard', 'set-user-pwd-expiration-span', '0'])
+
+ def test_pwd_expiration_date_update(self):
+ self._ceph_cmd(['dashboard', 'set-user-pwd-expiration-span', '10'])
+ self.create_user('user1', 'mypassword10#', ['administrator'])
+
+ user_1 = self._get('/api/user/user1')
+ self.assertStatus(200)
+
+ # Let's wait 1 s to ensure pwd expiration date is not the same
+ time.sleep(1)
+
+ self.login('user1', 'mypassword10#')
+ self._post('/api/user/user1/change_password', {
+ 'old_password': 'mypassword10#',
+ 'new_password': 'newpassword01#'
+ })
+ self.assertStatus(200)
+
+ # Compare password expiration dates.
+ self._reset_login_to_admin()
+ user_1_pwd_changed = self._get('/api/user/user1')
+ self.assertStatus(200)
+ self.assertLess(user_1['pwdExpirationDate'], user_1_pwd_changed['pwdExpirationDate'])
+
+ # Cleanup
+ self.delete_user('user1')
+ self._ceph_cmd(['dashboard', 'set-user-pwd-expiration-span', '0'])
+
+ def test_pwd_update_required(self):
+ self._create_user(username='user1',
+ password='mypassword10#',
+ name='My Name',
+ email='my@email.com',
+ roles=['administrator'],
+ pwd_update_required=True)
+ self.assertStatus(201)
+
+ user_1 = self._get('/api/user/user1')
+ self.assertStatus(200)
+ self.assertEqual(user_1['pwdUpdateRequired'], True)
+
+ self.login('user1', 'mypassword10#')
+ self.assertStatus(201)
+
+ self._get('/api/osd')
+ self.assertStatus(403)
+ self._reset_login_to_admin('user1')
+
+ def test_pwd_update_required_change_pwd(self):
+ self._create_user(username='user1',
+ password='mypassword10#',
+ name='My Name',
+ email='my@email.com',
+ roles=['administrator'],
+ pwd_update_required=True)
+ self.assertStatus(201)
+
+ self.login('user1', 'mypassword10#')
+ self._post('/api/user/user1/change_password', {
+ 'old_password': 'mypassword10#',
+ 'new_password': 'newpassword01#'
+ })
+
+ self.login('user1', 'newpassword01#')
+ user_1 = self._get('/api/user/user1')
+ self.assertStatus(200)
+ self.assertEqual(user_1['pwdUpdateRequired'], False)
+ self._get('/api/osd')
+ self.assertStatus(200)
+ self._reset_login_to_admin('user1')
+
+ def test_validate_password_weak(self):
+ self._post('/api/user/validate_password', {
+ 'password': 'mypassword1'
+ })
+ self.assertStatus(200)
+ self.assertJsonBody({
+ 'valid': True,
+ 'credits': 11,
+ 'valuation': 'Weak'
+ })
+
+ def test_validate_password_ok(self):
+ self._post('/api/user/validate_password', {
+ 'password': 'mypassword1!@'
+ })
+ self.assertStatus(200)
+ self.assertJsonBody({
+ 'valid': True,
+ 'credits': 17,
+ 'valuation': 'OK'
+ })
+
+ def test_validate_password_strong(self):
+ self._post('/api/user/validate_password', {
+ 'password': 'testpassword0047!@'
+ })
+ self.assertStatus(200)
+ self.assertJsonBody({
+ 'valid': True,
+ 'credits': 22,
+ 'valuation': 'Strong'
+ })
+
+ def test_validate_password_very_strong(self):
+ self._post('/api/user/validate_password', {
+ 'password': 'testpassword#!$!@$'
+ })
+ self.assertStatus(200)
+ self.assertJsonBody({
+ 'valid': True,
+ 'credits': 30,
+ 'valuation': 'Very strong'
+ })
+
+ def test_validate_password_fail(self):
+ self._post('/api/user/validate_password', {
+ 'password': 'foo'
+ })
+ self.assertStatus(200)
+ self.assertJsonBody({
+ 'valid': False,
+ 'credits': 0,
+ 'valuation': 'Password is too weak.'
+ })
+
+ def test_validate_password_fail_name(self):
+ self._post('/api/user/validate_password', {
+ 'password': 'x1zhugo_10',
+ 'username': 'hugo'
+ })
+ self.assertStatus(200)
+ self.assertJsonBody({
+ 'valid': False,
+ 'credits': 0,
+ 'valuation': 'Password must not contain username.'
+ })
+
+ def test_validate_password_fail_oldpwd(self):
+ self._post('/api/user/validate_password', {
+ 'password': 'x1zt-st10',
+ 'old_password': 'x1zt-st10'
+ })
+ self.assertStatus(200)
+ self.assertJsonBody({
+ 'valid': False,
+ 'credits': 0,
+ 'valuation': 'Password must not be the same as the previous one.'
+ })
+
+ def test_create_user_pwd_update_required(self):
+ self.create_user('foo', 'bar', cmd_args=['--pwd_update_required'])
+ self._get('/api/user/foo')
+ self.assertStatus(200)
+ self.assertJsonSubset({
+ 'username': 'foo',
+ 'pwdUpdateRequired': True
+ })
+ self.delete_user('foo')
diff --git a/qa/tasks/mgr/mgr_test_case.py b/qa/tasks/mgr/mgr_test_case.py
new file mode 100644
index 000000000..94a230c8d
--- /dev/null
+++ b/qa/tasks/mgr/mgr_test_case.py
@@ -0,0 +1,228 @@
+import json
+import logging
+
+from unittest import SkipTest
+
+from teuthology import misc
+from tasks.ceph_test_case import CephTestCase
+
+# TODO move definition of CephCluster away from the CephFS stuff
+from tasks.cephfs.filesystem import CephCluster
+
+
+log = logging.getLogger(__name__)
+
+
+class MgrCluster(CephCluster):
+ def __init__(self, ctx):
+ super(MgrCluster, self).__init__(ctx)
+ self.mgr_ids = list(misc.all_roles_of_type(ctx.cluster, 'mgr'))
+
+ if len(self.mgr_ids) == 0:
+ raise RuntimeError(
+ "This task requires at least one manager daemon")
+
+ self.mgr_daemons = dict(
+ [(mgr_id, self._ctx.daemons.get_daemon('mgr', mgr_id)) for mgr_id
+ in self.mgr_ids])
+
+ def mgr_stop(self, mgr_id):
+ self.mgr_daemons[mgr_id].stop()
+
+ def mgr_fail(self, mgr_id):
+ self.mon_manager.raw_cluster_cmd("mgr", "fail", mgr_id)
+
+ def mgr_restart(self, mgr_id):
+ self.mgr_daemons[mgr_id].restart()
+
+ def get_mgr_map(self):
+ return json.loads(
+ self.mon_manager.raw_cluster_cmd("mgr", "dump", "--format=json-pretty"))
+
+ def get_registered_clients(self, name, mgr_map = None):
+ if mgr_map is None:
+ mgr_map = self.get_mgr_map()
+ for c in mgr_map['active_clients']:
+ if c['name'] == name:
+ return c['addrvec']
+ return None
+
+ def get_active_id(self):
+ return self.get_mgr_map()["active_name"]
+
+ def get_standby_ids(self):
+ return [s['name'] for s in self.get_mgr_map()["standbys"]]
+
+ def set_module_conf(self, module, key, val):
+ self.mon_manager.raw_cluster_cmd("config", "set", "mgr",
+ "mgr/{0}/{1}".format(
+ module, key
+ ), val)
+
+ def set_module_localized_conf(self, module, mgr_id, key, val, force):
+ cmd = ["config", "set", "mgr",
+ "/".join(["mgr", module, mgr_id, key]),
+ val]
+ if force:
+ cmd.append("--force")
+ self.mon_manager.raw_cluster_cmd(*cmd)
+
+
+class MgrTestCase(CephTestCase):
+ MGRS_REQUIRED = 1
+
+ @classmethod
+ def setup_mgrs(cls):
+ # Stop all the daemons
+ for daemon in cls.mgr_cluster.mgr_daemons.values():
+ daemon.stop()
+
+ for mgr_id in cls.mgr_cluster.mgr_ids:
+ cls.mgr_cluster.mgr_fail(mgr_id)
+
+ # Unload all non-default plugins
+ loaded = json.loads(cls.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "module", "ls", "--format=json-pretty"))['enabled_modules']
+ unload_modules = set(loaded) - {"cephadm", "restful"}
+
+ for m in unload_modules:
+ cls.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "module", "disable", m)
+
+ # Start all the daemons
+ for daemon in cls.mgr_cluster.mgr_daemons.values():
+ daemon.restart()
+
+ # Wait for an active to come up
+ cls.wait_until_true(lambda: cls.mgr_cluster.get_active_id() != "",
+ timeout=20)
+
+ expect_standbys = set(cls.mgr_cluster.mgr_ids) \
+ - {cls.mgr_cluster.get_active_id()}
+ cls.wait_until_true(
+ lambda: set(cls.mgr_cluster.get_standby_ids()) == expect_standbys,
+ timeout=20)
+
+ @classmethod
+ def setUpClass(cls):
+ # The test runner should have populated this
+ assert cls.mgr_cluster is not None
+
+ if len(cls.mgr_cluster.mgr_ids) < cls.MGRS_REQUIRED:
+ raise SkipTest(
+ "Only have {0} manager daemons, {1} are required".format(
+ len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED))
+
+ cls.setup_mgrs()
+
+ @classmethod
+ def _unload_module(cls, module_name):
+ def is_disabled():
+ enabled_modules = json.loads(cls.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'mgr', 'module', 'ls', "--format=json-pretty"))['enabled_modules']
+ return module_name not in enabled_modules
+
+ if is_disabled():
+ return
+
+ log.debug("Unloading Mgr module %s ...", module_name)
+ cls.mgr_cluster.mon_manager.raw_cluster_cmd('mgr', 'module', 'disable', module_name)
+ cls.wait_until_true(is_disabled, timeout=30)
+
+ @classmethod
+ def _load_module(cls, module_name):
+ loaded = json.loads(cls.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "module", "ls", "--format=json-pretty"))['enabled_modules']
+ if module_name in loaded:
+ # The enable command is idempotent, but our wait for a restart
+ # isn't, so let's return now if it's already loaded
+ return
+
+ initial_mgr_map = cls.mgr_cluster.get_mgr_map()
+
+ # check if the the module is configured as an always on module
+ mgr_daemons = json.loads(cls.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "metadata"))
+
+ for daemon in mgr_daemons:
+ if daemon["name"] == initial_mgr_map["active_name"]:
+ ceph_version = daemon["ceph_release"]
+ always_on = initial_mgr_map["always_on_modules"].get(ceph_version, [])
+ if module_name in always_on:
+ return
+
+ log.debug("Loading Mgr module %s ...", module_name)
+ initial_gid = initial_mgr_map['active_gid']
+ cls.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "module", "enable", module_name, "--force")
+
+ # Wait for the module to load
+ def has_restarted():
+ mgr_map = cls.mgr_cluster.get_mgr_map()
+ done = mgr_map['active_gid'] != initial_gid and mgr_map['available']
+ if done:
+ log.debug("Restarted after module load (new active {0}/{1})".format(
+ mgr_map['active_name'], mgr_map['active_gid']))
+ return done
+ cls.wait_until_true(has_restarted, timeout=30)
+
+
+ @classmethod
+ def _get_uri(cls, service_name):
+ # Little dict hack so that I can assign into this from
+ # the get_or_none function
+ mgr_map = {'x': None}
+
+ def _get_or_none():
+ mgr_map['x'] = cls.mgr_cluster.get_mgr_map()
+ result = mgr_map['x']['services'].get(service_name, None)
+ return result
+
+ cls.wait_until_true(lambda: _get_or_none() is not None, 30)
+
+ uri = mgr_map['x']['services'][service_name]
+
+ log.debug("Found {0} at {1} (daemon {2}/{3})".format(
+ service_name, uri, mgr_map['x']['active_name'],
+ mgr_map['x']['active_gid']))
+
+ return uri
+
+ @classmethod
+ def _assign_ports(cls, module_name, config_name, min_port=7789):
+ """
+ To avoid the need to run lots of hosts in teuthology tests to
+ get different URLs per mgr, we will hand out different ports
+ to each mgr here.
+
+ This is already taken care of for us when running in a vstart
+ environment.
+ """
+ # Start handing out ports well above Ceph's range.
+ assign_port = min_port
+
+ for mgr_id in cls.mgr_cluster.mgr_ids:
+ cls.mgr_cluster.mgr_stop(mgr_id)
+ cls.mgr_cluster.mgr_fail(mgr_id)
+
+ for mgr_id in cls.mgr_cluster.mgr_ids:
+ log.debug("Using port {0} for {1} on mgr.{2}".format(
+ assign_port, module_name, mgr_id
+ ))
+ cls.mgr_cluster.set_module_localized_conf(module_name, mgr_id,
+ config_name,
+ str(assign_port),
+ force=True)
+ assign_port += 1
+
+ for mgr_id in cls.mgr_cluster.mgr_ids:
+ cls.mgr_cluster.mgr_restart(mgr_id)
+
+ def is_available():
+ mgr_map = cls.mgr_cluster.get_mgr_map()
+ done = mgr_map['available']
+ if done:
+ log.debug("Available after assign ports (new active {0}/{1})".format(
+ mgr_map['active_name'], mgr_map['active_gid']))
+ return done
+ cls.wait_until_true(is_available, timeout=30)
diff --git a/qa/tasks/mgr/test_cache.py b/qa/tasks/mgr/test_cache.py
new file mode 100644
index 000000000..71131cbc6
--- /dev/null
+++ b/qa/tasks/mgr/test_cache.py
@@ -0,0 +1,83 @@
+import json
+
+from .mgr_test_case import MgrTestCase
+
+class TestCache(MgrTestCase):
+
+ def setUp(self):
+ super(TestCache, self).setUp()
+ self.setup_mgrs()
+ self._load_module("cli_api")
+ self.ttl = 10
+ self.enable_cache(self.ttl)
+
+ def tearDown(self):
+ self.disable_cache()
+
+ def get_hit_miss_ratio(self):
+ perf_dump_command = f"daemon mgr.{self.mgr_cluster.get_active_id()} perf dump"
+ perf_dump_res = self.cluster_cmd(perf_dump_command)
+ perf_dump = json.loads(perf_dump_res)
+ h = perf_dump["mgr"]["cache_hit"]
+ m = perf_dump["mgr"]["cache_miss"]
+ return int(h), int(m)
+
+ def enable_cache(self, ttl):
+ set_ttl = f"config set mgr mgr_ttl_cache_expire_seconds {ttl}"
+ self.cluster_cmd(set_ttl)
+
+ def disable_cache(self):
+ set_ttl = "config set mgr mgr_ttl_cache_expire_seconds 0"
+ self.cluster_cmd(set_ttl)
+
+
+ def test_init_cache(self):
+ get_ttl = "config get mgr mgr_ttl_cache_expire_seconds"
+ res = self.cluster_cmd(get_ttl)
+ self.assertEquals(int(res), 10)
+
+ def test_health_not_cached(self):
+ get_health = "mgr api get health"
+
+ h_start, m_start = self.get_hit_miss_ratio()
+ self.cluster_cmd(get_health)
+ h, m = self.get_hit_miss_ratio()
+
+ self.assertEquals(h, h_start)
+ self.assertEquals(m, m_start)
+
+ def test_osdmap(self):
+ get_osdmap = "mgr api get osd_map"
+
+ # store in cache
+ self.cluster_cmd(get_osdmap)
+ # get from cache
+ res = self.cluster_cmd(get_osdmap)
+ osd_map = json.loads(res)
+ self.assertIn("osds", osd_map)
+ self.assertGreater(len(osd_map["osds"]), 0)
+ self.assertIn("epoch", osd_map)
+
+
+
+ def test_hit_miss_ratio(self):
+ get_osdmap = "mgr api get osd_map"
+
+ hit_start, miss_start = self.get_hit_miss_ratio()
+
+ def wait_miss():
+ self.cluster_cmd(get_osdmap)
+ _, m = self.get_hit_miss_ratio()
+ return m == miss_start + 1
+
+ # Miss, add osd_map to cache
+ self.wait_until_true(wait_miss, self.ttl + 5)
+ h, m = self.get_hit_miss_ratio()
+ self.assertEquals(h, hit_start)
+ self.assertEquals(m, miss_start+1)
+
+ # Hit, get osd_map from cache
+ self.cluster_cmd(get_osdmap)
+ h, m = self.get_hit_miss_ratio()
+ self.assertEquals(h, hit_start+1)
+ self.assertEquals(m, miss_start+1)
diff --git a/qa/tasks/mgr/test_crash.py b/qa/tasks/mgr/test_crash.py
new file mode 100644
index 000000000..49191127f
--- /dev/null
+++ b/qa/tasks/mgr/test_crash.py
@@ -0,0 +1,108 @@
+import json
+import logging
+import datetime
+
+from .mgr_test_case import MgrTestCase
+
+
+log = logging.getLogger(__name__)
+UUID = 'd5775432-0742-44a3-a435-45095e32e6b1'
+DATEFMT = '%Y-%m-%d %H:%M:%S.%f'
+
+
+class TestCrash(MgrTestCase):
+
+ def setUp(self):
+ super(TestCrash, self).setUp()
+ self.setup_mgrs()
+ self._load_module('crash')
+
+ # Whip up some crash data
+ self.crashes = dict()
+ now = datetime.datetime.utcnow()
+
+ for i in (0, 1, 3, 4, 8):
+ timestamp = now - datetime.timedelta(days=i)
+ timestamp = timestamp.strftime(DATEFMT) + 'Z'
+ crash_id = '_'.join((timestamp, UUID)).replace(' ', '_')
+ self.crashes[crash_id] = {
+ 'crash_id': crash_id, 'timestamp': timestamp,
+ }
+
+ self.assertEqual(
+ 0,
+ self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
+ 'crash', 'post', '-i', '-',
+ stdin=json.dumps(self.crashes[crash_id]),
+ )
+ )
+
+ retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'crash', 'ls',
+ )
+ log.warning("setUp: crash ls returns %s" % retstr)
+
+ self.oldest_crashid = crash_id
+
+ def tearDown(self):
+ for crash in self.crashes.values():
+ self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
+ 'crash', 'rm', crash['crash_id']
+ )
+
+ def test_info(self):
+ for crash in self.crashes.values():
+ log.warning('test_info: crash %s' % crash)
+ retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'crash', 'ls'
+ )
+ log.warning('ls output: %s' % retstr)
+ retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'crash', 'info', crash['crash_id'],
+ )
+ log.warning('crash info output: %s' % retstr)
+ crashinfo = json.loads(retstr)
+ self.assertIn('crash_id', crashinfo)
+ self.assertIn('timestamp', crashinfo)
+
+ def test_ls(self):
+ retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'crash', 'ls',
+ )
+ for crash in self.crashes.values():
+ self.assertIn(crash['crash_id'], retstr)
+
+ def test_rm(self):
+ crashid = next(iter(self.crashes.keys()))
+ self.assertEqual(
+ 0,
+ self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
+ 'crash', 'rm', crashid,
+ )
+ )
+
+ retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'crash', 'ls',
+ )
+ self.assertNotIn(crashid, retstr)
+
+ def test_stat(self):
+ retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'crash', 'stat',
+ )
+ self.assertIn('5 crashes recorded', retstr)
+ self.assertIn('4 older than 1 days old:', retstr)
+ self.assertIn('3 older than 3 days old:', retstr)
+ self.assertIn('1 older than 7 days old:', retstr)
+
+ def test_prune(self):
+ self.assertEqual(
+ 0,
+ self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
+ 'crash', 'prune', '5'
+ )
+ )
+ retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'crash', 'ls',
+ )
+ self.assertNotIn(self.oldest_crashid, retstr)
diff --git a/qa/tasks/mgr/test_dashboard.py b/qa/tasks/mgr/test_dashboard.py
new file mode 100644
index 000000000..c3459ec02
--- /dev/null
+++ b/qa/tasks/mgr/test_dashboard.py
@@ -0,0 +1,177 @@
+import logging
+import ssl
+
+import requests
+from requests.adapters import HTTPAdapter
+
+from .mgr_test_case import MgrTestCase
+
+log = logging.getLogger(__name__)
+
+
+class TestDashboard(MgrTestCase):
+ MGRS_REQUIRED = 3
+
+ def setUp(self):
+ super(TestDashboard, self).setUp()
+
+ self._assign_ports("dashboard", "ssl_server_port")
+ self._load_module("dashboard")
+ self.mgr_cluster.mon_manager.raw_cluster_cmd("dashboard",
+ "create-self-signed-cert")
+
+ def tearDown(self):
+ self.mgr_cluster.mon_manager.raw_cluster_cmd("config", "set", "mgr",
+ "mgr/dashboard/standby_behaviour",
+ "redirect")
+ self.mgr_cluster.mon_manager.raw_cluster_cmd("config", "set", "mgr",
+ "mgr/dashboard/standby_error_status_code",
+ "500")
+
+ def wait_until_webserver_available(self, url):
+ def _check_connection():
+ try:
+ requests.get(url, allow_redirects=False, verify=False)
+ return True
+ except requests.ConnectionError:
+ pass
+ return False
+ self.wait_until_true(_check_connection, timeout=30)
+
+ def test_standby(self):
+ # skip this test if mgr_standby_modules=false
+ if self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "config", "get", "mgr", "mgr_standby_modules").strip() == "false":
+ log.info("Skipping test_standby since mgr_standby_modules=false")
+ return
+
+ original_active_id = self.mgr_cluster.get_active_id()
+ original_uri = self._get_uri("dashboard")
+ log.info("Originally running manager '{}' at {}".format(
+ original_active_id, original_uri))
+
+ # Force a failover and wait until the previously active manager
+ # is listed as standby.
+ self.mgr_cluster.mgr_fail(original_active_id)
+ self.wait_until_true(
+ lambda: original_active_id in self.mgr_cluster.get_standby_ids(),
+ timeout=30)
+
+ failed_active_id = self.mgr_cluster.get_active_id()
+ failed_over_uri = self._get_uri("dashboard")
+ log.info("After failover running manager '{}' at {}".format(
+ failed_active_id, failed_over_uri))
+
+ self.assertNotEqual(original_uri, failed_over_uri)
+
+ # Wait until web server of the standby node is settled.
+ self.wait_until_webserver_available(original_uri)
+
+ # The original active daemon should have come back up as a standby
+ # and be doing redirects to the new active daemon.
+ r = requests.get(original_uri, allow_redirects=False, verify=False)
+ self.assertEqual(r.status_code, 303)
+ self.assertEqual(r.headers['Location'], failed_over_uri)
+
+ # Ensure that every URL redirects to the active daemon.
+ r = requests.get("{}/runtime.js".format(original_uri.strip('/')),
+ allow_redirects=False,
+ verify=False)
+ self.assertEqual(r.status_code, 303)
+ self.assertEqual(r.headers['Location'], failed_over_uri)
+
+ def test_standby_disable_redirect(self):
+ self.mgr_cluster.mon_manager.raw_cluster_cmd("config", "set", "mgr",
+ "mgr/dashboard/standby_behaviour",
+ "error")
+
+ original_active_id = self.mgr_cluster.get_active_id()
+ original_uri = self._get_uri("dashboard")
+ log.info("Originally running manager '{}' at {}".format(
+ original_active_id, original_uri))
+
+ # Force a failover and wait until the previously active manager
+ # is listed as standby.
+ self.mgr_cluster.mgr_fail(original_active_id)
+ self.wait_until_true(
+ lambda: original_active_id in self.mgr_cluster.get_standby_ids(),
+ timeout=30)
+
+ failed_active_id = self.mgr_cluster.get_active_id()
+ failed_over_uri = self._get_uri("dashboard")
+ log.info("After failover running manager '{}' at {}".format(
+ failed_active_id, failed_over_uri))
+
+ self.assertNotEqual(original_uri, failed_over_uri)
+
+ # Wait until web server of the standby node is settled.
+ self.wait_until_webserver_available(original_uri)
+
+ # Redirection should be disabled now, instead a 500 must be returned.
+ r = requests.get(original_uri, allow_redirects=False, verify=False)
+ self.assertEqual(r.status_code, 500)
+
+ self.mgr_cluster.mon_manager.raw_cluster_cmd("config", "set", "mgr",
+ "mgr/dashboard/standby_error_status_code",
+ "503")
+
+ # The customized HTTP status code (503) must be returned.
+ r = requests.get(original_uri, allow_redirects=False, verify=False)
+ self.assertEqual(r.status_code, 503)
+
+ def test_urls(self):
+ base_uri = self._get_uri("dashboard")
+
+ # This is a very simple smoke test to check that the dashboard can
+ # give us a 200 response to requests. We're not testing that
+ # the content is correct or even renders!
+
+ urls = [
+ "/",
+ ]
+
+ failures = []
+
+ for url in urls:
+ r = requests.get(base_uri + url, allow_redirects=False,
+ verify=False)
+ if r.status_code >= 300 and r.status_code < 400:
+ log.error("Unexpected redirect to: {0} (from {1})".format(
+ r.headers['Location'], base_uri))
+ if r.status_code != 200:
+ failures.append(url)
+
+ log.info("{0}: {1} ({2} bytes)".format(
+ url, r.status_code, len(r.content)
+ ))
+
+ self.assertListEqual(failures, [])
+
+ def test_tls(self):
+ class CustomHTTPAdapter(HTTPAdapter):
+ def __init__(self, ssl_version):
+ self.ssl_version = ssl_version
+ super().__init__()
+
+ def init_poolmanager(self, *args, **kwargs):
+ kwargs['ssl_version'] = self.ssl_version
+ return super().init_poolmanager(*args, **kwargs)
+
+ uri = self._get_uri("dashboard")
+
+ # TLSv1
+ with self.assertRaises(requests.exceptions.SSLError):
+ session = requests.Session()
+ session.mount(uri, CustomHTTPAdapter(ssl.PROTOCOL_TLSv1))
+ session.get(uri, allow_redirects=False, verify=False)
+
+ # TLSv1.1
+ with self.assertRaises(requests.exceptions.SSLError):
+ session = requests.Session()
+ session.mount(uri, CustomHTTPAdapter(ssl.PROTOCOL_TLSv1_1))
+ session.get(uri, allow_redirects=False, verify=False)
+
+ session = requests.Session()
+ session.mount(uri, CustomHTTPAdapter(ssl.PROTOCOL_TLS))
+ r = session.get(uri, allow_redirects=False, verify=False)
+ self.assertEqual(r.status_code, 200)
diff --git a/qa/tasks/mgr/test_failover.py b/qa/tasks/mgr/test_failover.py
new file mode 100644
index 000000000..bfff11262
--- /dev/null
+++ b/qa/tasks/mgr/test_failover.py
@@ -0,0 +1,182 @@
+
+import logging
+import json
+
+from .mgr_test_case import MgrTestCase
+
+
+log = logging.getLogger(__name__)
+
+
+class TestFailover(MgrTestCase):
+ MGRS_REQUIRED = 2
+
+ def setUp(self):
+ super(TestFailover, self).setUp()
+ self.setup_mgrs()
+
+ def test_timeout(self):
+ """
+ That when an active mgr stops responding, a standby is promoted
+ after mon_mgr_beacon_grace.
+ """
+
+ # Query which mgr is active
+ original_active = self.mgr_cluster.get_active_id()
+ original_standbys = self.mgr_cluster.get_standby_ids()
+
+ # Stop that daemon
+ self.mgr_cluster.mgr_stop(original_active)
+
+ # Assert that the other mgr becomes active
+ self.wait_until_true(
+ lambda: self.mgr_cluster.get_active_id() in original_standbys,
+ timeout=60
+ )
+
+ self.mgr_cluster.mgr_restart(original_active)
+ self.wait_until_true(
+ lambda: original_active in self.mgr_cluster.get_standby_ids(),
+ timeout=10
+ )
+
+ def test_timeout_nostandby(self):
+ """
+ That when an active mgr stop responding, and no standby is
+ available, the active mgr is removed from the map anyway.
+ """
+ # Query which mgr is active
+ original_active = self.mgr_cluster.get_active_id()
+ original_standbys = self.mgr_cluster.get_standby_ids()
+
+ for s in original_standbys:
+ self.mgr_cluster.mgr_stop(s)
+ self.mgr_cluster.mgr_fail(s)
+
+ self.assertListEqual(self.mgr_cluster.get_standby_ids(), [])
+ self.assertEqual(self.mgr_cluster.get_active_id(), original_active)
+
+ grace = int(self.mgr_cluster.get_config("mon_mgr_beacon_grace"))
+ log.info("Should time out in about {0} seconds".format(grace))
+
+ self.mgr_cluster.mgr_stop(original_active)
+
+ # Now wait for the mon to notice the mgr is gone and remove it
+ # from the map.
+ self.wait_until_equal(
+ lambda: self.mgr_cluster.get_active_id(),
+ "",
+ timeout=grace * 2
+ )
+
+ self.assertListEqual(self.mgr_cluster.get_standby_ids(), [])
+ self.assertEqual(self.mgr_cluster.get_active_id(), "")
+
+ def test_explicit_fail(self):
+ """
+ That when a user explicitly fails a daemon, a standby immediately
+ replaces it.
+ :return:
+ """
+ # Query which mgr is active
+ original_active = self.mgr_cluster.get_active_id()
+ original_standbys = self.mgr_cluster.get_standby_ids()
+
+ self.mgr_cluster.mgr_fail(original_active)
+
+ # A standby should take over
+ self.wait_until_true(
+ lambda: self.mgr_cluster.get_active_id() in original_standbys,
+ timeout=60
+ )
+
+ # The one we failed should come back as a standby (he isn't
+ # really dead)
+ self.wait_until_true(
+ lambda: original_active in self.mgr_cluster.get_standby_ids(),
+ timeout=10
+ )
+
+ # Both daemons should have fully populated metadata
+ # (regression test for http://tracker.ceph.com/issues/21260)
+ meta = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "metadata"))
+ id_to_meta = dict([(i['name'], i) for i in meta])
+ for i in [original_active] + original_standbys:
+ self.assertIn(i, id_to_meta)
+ self.assertIn('ceph_version', id_to_meta[i])
+
+ # We should be able to fail back over again: the exercises
+ # our re-initialization of the python runtime within
+ # a single process lifetime.
+
+ # Get rid of any bystander standbys so that the original_active
+ # will be selected as next active.
+ new_active = self.mgr_cluster.get_active_id()
+ for daemon in original_standbys:
+ if daemon != new_active:
+ self.mgr_cluster.mgr_stop(daemon)
+ self.mgr_cluster.mgr_fail(daemon)
+
+ self.assertListEqual(self.mgr_cluster.get_standby_ids(),
+ [original_active])
+
+ self.mgr_cluster.mgr_stop(new_active)
+ self.mgr_cluster.mgr_fail(new_active)
+
+ self.assertEqual(self.mgr_cluster.get_active_id(), original_active)
+ self.assertEqual(self.mgr_cluster.get_standby_ids(), [])
+
+ def test_standby_timeout(self):
+ """
+ That when a standby daemon stops sending beacons, it is
+ removed from the list of standbys
+ :return:
+ """
+ original_active = self.mgr_cluster.get_active_id()
+ original_standbys = self.mgr_cluster.get_standby_ids()
+
+ victim = original_standbys[0]
+ self.mgr_cluster.mgr_stop(victim)
+
+ expect_standbys = set(original_standbys) - {victim}
+
+ self.wait_until_true(
+ lambda: set(self.mgr_cluster.get_standby_ids()) == expect_standbys,
+ timeout=60
+ )
+ self.assertEqual(self.mgr_cluster.get_active_id(), original_active)
+
+class TestLibCephSQLiteFailover(MgrTestCase):
+ MGRS_REQUIRED = 1
+
+ def setUp(self):
+ super(TestLibCephSQLiteFailover, self).setUp()
+ self.setup_mgrs()
+
+ def get_libcephsqlite(self):
+ mgr_map = self.mgr_cluster.get_mgr_map()
+ addresses = self.mgr_cluster.get_registered_clients('libcephsqlite', mgr_map=mgr_map)
+ self.assertEqual(len(addresses), 1)
+ return addresses[0]
+
+ def test_maybe_reonnect(self):
+ """
+ That the devicehealth module can recover after losing its libcephsqlite lock.
+ """
+
+ # make sure the database is populated and loaded by the module
+ self.mgr_cluster.mon_manager.ceph("device scrape-health-metrics")
+
+ oldaddr = self.get_libcephsqlite()
+ self.mgr_cluster.mon_manager.ceph(f"osd blocklist add {oldaddr['addr']}/{oldaddr['nonce']}")
+
+ def test():
+ self.mgr_cluster.mon_manager.ceph("device scrape-health-metrics")
+ newaddr = self.get_libcephsqlite()
+ return oldaddr != newaddr
+
+ self.wait_until_true(
+ test,
+ timeout=30
+ )
diff --git a/qa/tasks/mgr/test_insights.py b/qa/tasks/mgr/test_insights.py
new file mode 100644
index 000000000..aa2548881
--- /dev/null
+++ b/qa/tasks/mgr/test_insights.py
@@ -0,0 +1,192 @@
+import logging
+import json
+import datetime
+import time
+
+from .mgr_test_case import MgrTestCase
+
+
+log = logging.getLogger(__name__)
+UUID = 'd5775432-0742-44a3-a435-45095e32e6b2'
+DATEFMT = '%Y-%m-%d %H:%M:%S.%f'
+
+class TestInsights(MgrTestCase):
+ def setUp(self):
+ super(TestInsights, self).setUp()
+ self.setup_mgrs()
+ self._load_module("insights")
+ self._load_module("selftest")
+ self.crash_ids = []
+
+ def tearDown(self):
+ self._clear_crashes()
+
+ def _insights(self):
+ retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd("insights")
+ return json.loads(retstr)
+
+ def _add_crash(self, hours, make_invalid = False):
+ now = datetime.datetime.utcnow()
+ timestamp = now - datetime.timedelta(hours = hours)
+ timestamp = timestamp.strftime(DATEFMT) + 'Z'
+ crash_id = '_'.join((timestamp, UUID)).replace(' ', '_')
+ crash = {
+ 'crash_id': crash_id,
+ 'timestamp': timestamp,
+ }
+ if make_invalid:
+ crash["timestamp"] = "not a timestamp"
+
+ ret = self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
+ 'crash', 'post', '-i', '-',
+ stdin=json.dumps(crash)
+ )
+ self.crash_ids.append(crash_id)
+ self.assertEqual(0, ret)
+
+ def _clear_crashes(self):
+ for crash_id in self.crash_ids:
+ self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
+ 'crash', 'rm', crash_id
+ )
+
+ def _wait_for_health_history_checks(self, *args):
+ """Wait for a set of health checks to appear in the health history"""
+ timeout = datetime.datetime.utcnow() + \
+ datetime.timedelta(seconds = 15)
+ while True:
+ report = self._insights()
+ missing = False
+ for check in args:
+ if check not in report["health"]["history"]["checks"]:
+ missing = True
+ break
+ if not missing:
+ return
+ self.assertGreater(timeout,
+ datetime.datetime.utcnow())
+ time.sleep(0.25)
+
+ def _wait_for_curr_health_cleared(self, check):
+ timeout = datetime.datetime.utcnow() + \
+ datetime.timedelta(seconds = 15)
+ while True:
+ report = self._insights()
+ if check not in report["health"]["current"]["checks"]:
+ return
+ self.assertGreater(timeout,
+ datetime.datetime.utcnow())
+ time.sleep(0.25)
+
+ def test_health_history(self):
+ # use empty health history as starting point
+ self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
+ "insights", "prune-health", "0")
+ report = self._insights()
+ self.assertFalse(report["health"]["history"]["checks"])
+
+ # generate health check history entries. we want to avoid the edge case
+ # of running these tests at _exactly_ the top of the hour so we can
+ # explicitly control when hourly work occurs. for this we use the
+ # current time offset to a half hour.
+ now = datetime.datetime.utcnow()
+ now = datetime.datetime(
+ year = now.year,
+ month = now.month,
+ day = now.day,
+ hour = now.hour,
+ minute = 30)
+
+ check_names = set()
+ for hours in [-18, -11, -5, -1, 0]:
+ # change the insight module's perception of "now" ...
+ self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
+ "mgr", "self-test", "insights_set_now_offset", str(hours))
+
+ # ... to simulate health check arrivals in the past
+ unique_check_name = "insights_health_check_{}".format(hours)
+ health_check = {
+ unique_check_name: {
+ "severity": "warning",
+ "summary": "summary",
+ "detail": ["detail"]
+ }
+ }
+ self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
+ "mgr", "self-test", "health", "set",
+ json.dumps(health_check))
+
+ check_names.add(unique_check_name)
+
+ # and also set the same health check to test deduplication
+ dupe_check_name = "insights_health_check"
+ health_check = {
+ dupe_check_name: {
+ "severity": "warning",
+ "summary": "summary",
+ "detail": ["detail"]
+ }
+ }
+ self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
+ "mgr", "self-test", "health", "set",
+ json.dumps(health_check))
+
+ check_names.add(dupe_check_name)
+
+ # wait for the health check to show up in the history report
+ self._wait_for_health_history_checks(unique_check_name, dupe_check_name)
+
+ # clear out the current health checks before moving on
+ self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
+ "mgr", "self-test", "health", "clear")
+ self._wait_for_curr_health_cleared(unique_check_name)
+
+ report = self._insights()
+ for check in check_names:
+ self.assertIn(check, report["health"]["history"]["checks"])
+
+ # restart the manager
+ active_id = self.mgr_cluster.get_active_id()
+ self.mgr_cluster.mgr_restart(active_id)
+
+ # pruning really removes history
+ self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
+ "insights", "prune-health", "0")
+ report = self._insights()
+ self.assertFalse(report["health"]["history"]["checks"])
+
+ def test_schema(self):
+ """TODO: assert conformance to a full schema specification?"""
+ report = self._insights()
+ for key in ["osd_metadata",
+ "pg_summary",
+ "mon_status",
+ "manager_map",
+ "service_map",
+ "mon_map",
+ "crush_map",
+ "fs_map",
+ "osd_tree",
+ "df",
+ "osd_dump",
+ "config",
+ "health",
+ "crashes",
+ "version",
+ "errors"]:
+ self.assertIn(key, report)
+
+ def test_crash_history(self):
+ self._clear_crashes()
+ report = self._insights()
+ self.assertFalse(report["crashes"]["summary"])
+ self.assertFalse(report["errors"])
+
+ # crashes show up in the report
+ self._add_crash(1)
+ report = self._insights()
+ self.assertTrue(report["crashes"]["summary"])
+ self.assertFalse(report["errors"])
+ log.warning("{}".format(json.dumps(report["crashes"], indent=2)))
+
+ self._clear_crashes()
diff --git a/qa/tasks/mgr/test_module_selftest.py b/qa/tasks/mgr/test_module_selftest.py
new file mode 100644
index 000000000..7ac296037
--- /dev/null
+++ b/qa/tasks/mgr/test_module_selftest.py
@@ -0,0 +1,254 @@
+
+import time
+import requests
+import errno
+import logging
+
+from teuthology.exceptions import CommandFailedError
+
+from .mgr_test_case import MgrTestCase
+
+
+log = logging.getLogger(__name__)
+
+
+class TestModuleSelftest(MgrTestCase):
+ """
+ That modules with a self-test command can be loaded and execute it
+ without errors.
+
+ This is not a substitute for really testing the modules, but it
+ is quick and is designed to catch regressions that could occur
+ if data structures change in a way that breaks how the modules
+ touch them.
+ """
+ MGRS_REQUIRED = 1
+
+ def setUp(self):
+ super(TestModuleSelftest, self).setUp()
+ self.setup_mgrs()
+
+ def _selftest_plugin(self, module_name):
+ self._load_module("selftest")
+ self._load_module(module_name)
+
+ # Execute the module's self_test() method
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "self-test", "module", module_name)
+
+ def test_zabbix(self):
+ # Set these mandatory config fields so that the zabbix module
+ # won't trigger health/log errors on load/serve.
+ self.mgr_cluster.set_module_conf("zabbix", "zabbix_host", "localhost")
+ self.mgr_cluster.set_module_conf("zabbix", "identifier", "foo")
+ self._selftest_plugin("zabbix")
+
+ def test_prometheus(self):
+ self._assign_ports("prometheus", "server_port", min_port=8100)
+ self._selftest_plugin("prometheus")
+
+ def test_influx(self):
+ self._selftest_plugin("influx")
+
+ def test_diskprediction_local(self):
+ self._load_module("selftest")
+ python_version = self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "self-test", "python-version")
+ if tuple(int(v) for v in python_version.split('.')) == (3, 8):
+ # https://tracker.ceph.com/issues/45147
+ self.skipTest(f'python {python_version} not compatible with '
+ 'diskprediction_local')
+ self._selftest_plugin("diskprediction_local")
+
+ def test_telegraf(self):
+ self._selftest_plugin("telegraf")
+
+ def test_iostat(self):
+ self._selftest_plugin("iostat")
+
+ def test_devicehealth(self):
+ self._selftest_plugin("devicehealth")
+
+ def test_selftest_run(self):
+ self._load_module("selftest")
+ self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "self-test", "run")
+
+ def test_telemetry(self):
+ self._selftest_plugin("telemetry")
+
+ def test_crash(self):
+ self._selftest_plugin("crash")
+
+ def test_orchestrator(self):
+ self._selftest_plugin("orchestrator")
+
+
+ def test_selftest_config_update(self):
+ """
+ That configuration updates are seen by running mgr modules
+ """
+ self._load_module("selftest")
+
+ def get_value():
+ return self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "self-test", "config", "get", "testkey").strip()
+
+ self.assertEqual(get_value(), "None")
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "config", "set", "mgr", "mgr/selftest/testkey", "foo")
+ self.wait_until_equal(get_value, "foo", timeout=10)
+
+ def get_localized_value():
+ return self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "self-test", "config", "get_localized", "testkey").strip()
+
+ self.assertEqual(get_localized_value(), "foo")
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "config", "set", "mgr", "mgr/selftest/{}/testkey".format(
+ self.mgr_cluster.get_active_id()),
+ "bar")
+ self.wait_until_equal(get_localized_value, "bar", timeout=10)
+
+
+ def test_selftest_command_spam(self):
+ # Use the selftest module to stress the mgr daemon
+ self._load_module("selftest")
+
+ # Use the dashboard to test that the mgr is still able to do its job
+ self._assign_ports("dashboard", "ssl_server_port")
+ self._load_module("dashboard")
+ self.mgr_cluster.mon_manager.raw_cluster_cmd("dashboard",
+ "create-self-signed-cert")
+
+ original_active = self.mgr_cluster.get_active_id()
+ original_standbys = self.mgr_cluster.get_standby_ids()
+
+ self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "self-test",
+ "background", "start",
+ "command_spam")
+
+ dashboard_uri = self._get_uri("dashboard")
+
+ delay = 10
+ periods = 10
+ for i in range(0, periods):
+ t1 = time.time()
+ # Check that an HTTP module remains responsive
+ r = requests.get(dashboard_uri, verify=False)
+ self.assertEqual(r.status_code, 200)
+
+ # Check that a native non-module command remains responsive
+ self.mgr_cluster.mon_manager.raw_cluster_cmd("osd", "df")
+
+ time.sleep(delay - (time.time() - t1))
+
+ self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "self-test",
+ "background", "stop")
+
+ # Check that all mgr daemons are still running
+ self.assertEqual(original_active, self.mgr_cluster.get_active_id())
+ self.assertEqual(original_standbys, self.mgr_cluster.get_standby_ids())
+
+ def test_module_commands(self):
+ """
+ That module-handled commands have appropriate behavior on
+ disabled/failed/recently-enabled modules.
+ """
+
+ # Calling a command on a disabled module should return the proper
+ # error code.
+ self._load_module("selftest")
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "module", "disable", "selftest")
+ with self.assertRaises(CommandFailedError) as exc_raised:
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "self-test", "run")
+
+ self.assertEqual(exc_raised.exception.exitstatus, errno.EOPNOTSUPP)
+
+ # Calling a command that really doesn't exist should give me EINVAL.
+ with self.assertRaises(CommandFailedError) as exc_raised:
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "osd", "albatross")
+
+ self.assertEqual(exc_raised.exception.exitstatus, errno.EINVAL)
+
+ # Enabling a module and then immediately using ones of its commands
+ # should work (#21683)
+ self._load_module("selftest")
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "self-test", "config", "get", "testkey")
+
+ # Calling a command for a failed module should return the proper
+ # error code.
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "self-test", "background", "start", "throw_exception")
+ with self.assertRaises(CommandFailedError) as exc_raised:
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "self-test", "run"
+ )
+ self.assertEqual(exc_raised.exception.exitstatus, errno.EIO)
+
+ # A health alert should be raised for a module that has thrown
+ # an exception from its serve() method
+ self.wait_for_health(
+ "Module 'selftest' has failed: Synthetic exception in serve",
+ timeout=30)
+ # prune the crash reports, so that the health report is back to
+ # clean
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "crash", "prune", "0")
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "module", "disable", "selftest")
+
+ self.wait_for_health_clear(timeout=30)
+
+ def test_module_remote(self):
+ """
+ Use the selftest module to exercise inter-module communication
+ """
+ self._load_module("selftest")
+ # The "self-test remote" operation just happens to call into
+ # influx.
+ self._load_module("influx")
+
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "self-test", "remote")
+
+ def test_selftest_cluster_log(self):
+ """
+ Use the selftest module to test the cluster/audit log interface.
+ """
+ priority_map = {
+ "info": "INF",
+ "security": "SEC",
+ "warning": "WRN",
+ "error": "ERR"
+ }
+ self._load_module("selftest")
+ for priority in priority_map.keys():
+ message = "foo bar {}".format(priority)
+ log_message = "[{}] {}".format(priority_map[priority], message)
+ # Check for cluster/audit logs:
+ # 2018-09-24 09:37:10.977858 mgr.x [INF] foo bar info
+ # 2018-09-24 09:37:10.977860 mgr.x [SEC] foo bar security
+ # 2018-09-24 09:37:10.977863 mgr.x [WRN] foo bar warning
+ # 2018-09-24 09:37:10.977866 mgr.x [ERR] foo bar error
+ with self.assert_cluster_log(log_message):
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "self-test", "cluster-log", "cluster",
+ priority, message)
+ with self.assert_cluster_log(log_message, watch_channel="audit"):
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "self-test", "cluster-log", "audit",
+ priority, message)
+
+ def test_selftest_cluster_log_unknown_channel(self):
+ """
+ Use the selftest module to test the cluster/audit log interface.
+ """
+ with self.assertRaises(CommandFailedError) as exc_raised:
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "mgr", "self-test", "cluster-log", "xyz",
+ "ERR", "The channel does not exist")
+ self.assertEqual(exc_raised.exception.exitstatus, errno.EOPNOTSUPP)
diff --git a/qa/tasks/mgr/test_orchestrator_cli.py b/qa/tasks/mgr/test_orchestrator_cli.py
new file mode 100644
index 000000000..3fccef9a6
--- /dev/null
+++ b/qa/tasks/mgr/test_orchestrator_cli.py
@@ -0,0 +1,250 @@
+import errno
+import json
+import logging
+
+
+from .mgr_test_case import MgrTestCase
+
+
+log = logging.getLogger(__name__)
+
+
+class TestOrchestratorCli(MgrTestCase):
+ MGRS_REQUIRED = 1
+
+ def _cmd(self, module, *args):
+ return self.mgr_cluster.mon_manager.raw_cluster_cmd(module, *args)
+
+ def _orch_cmd(self, *args):
+ return self._cmd("orch", *args)
+
+ def _progress_cmd(self, *args):
+ return self.mgr_cluster.mon_manager.raw_cluster_cmd("progress", *args)
+
+ def _orch_cmd_result(self, *args, **kwargs):
+ """
+ raw_cluster_cmd doesn't support kwargs.
+ """
+ return self.mgr_cluster.mon_manager.raw_cluster_cmd_result("orch", *args, **kwargs)
+
+ def _test_orchestrator_cmd_result(self, *args, **kwargs):
+ return self.mgr_cluster.mon_manager.raw_cluster_cmd_result("test_orchestrator", *args, **kwargs)
+
+ def setUp(self):
+ super(TestOrchestratorCli, self).setUp()
+
+ self._load_module("orchestrator")
+ self._load_module("test_orchestrator")
+ self._orch_cmd("set", "backend", "test_orchestrator")
+
+ def test_status(self):
+ ret = self._orch_cmd("status")
+ self.assertIn("test_orchestrator", ret)
+
+ def test_device_ls(self):
+ ret = self._orch_cmd("device", "ls")
+ self.assertIn("localhost", ret)
+
+ def test_device_ls_refresh(self):
+ ret = self._orch_cmd("device", "ls", "--refresh")
+ self.assertIn("localhost", ret)
+
+ def test_device_ls_hoshs(self):
+ ret = self._orch_cmd("device", "ls", "localhost", "host1")
+ self.assertIn("localhost", ret)
+
+
+ def test_device_ls_json(self):
+ ret = self._orch_cmd("device", "ls", "--format", "json")
+ self.assertIn("localhost", ret)
+ self.assertIsInstance(json.loads(ret), list)
+
+ def test_ps(self):
+ ret = self._orch_cmd("ps")
+ self.assertIn("mgr", ret)
+
+ def test_ps_json(self):
+ ret = self._orch_cmd("ps", "--format", "json")
+ self.assertIsInstance(json.loads(ret), list)
+ self.assertIn("mgr", ret)
+
+
+ def test_service_action(self):
+ self._orch_cmd("restart", "mds.cephfs")
+ self._orch_cmd("stop", "mds.cephfs")
+ self._orch_cmd("start", "mds.cephfs")
+
+ def test_service_instance_action(self):
+ self._orch_cmd("daemon", "restart", "mds.a")
+ self._orch_cmd("daemon", "stop", "mds.a")
+ self._orch_cmd("daemon", "start", "mds.a")
+
+ def test_osd_create(self):
+ drive_group = """
+service_type: osd
+service_id: any.sda
+placement:
+ host_pattern: '*'
+data_devices:
+ all: True
+"""
+ res = self._orch_cmd_result("apply", "osd", "-i", "-",
+ stdin=drive_group)
+ self.assertEqual(res, 0)
+
+ def test_blink_device_light(self):
+ def _ls_lights(what):
+ return json.loads(self._cmd("device", "ls-lights"))[what]
+
+ metadata = json.loads(self._cmd("osd", "metadata"))
+ dev_name_ids = [osd["device_ids"] for osd in metadata]
+ _, dev_id = [d.split('=') for d in dev_name_ids if len(d.split('=')) == 2][0]
+
+ for t in ["ident", "fault"]:
+ self.assertNotIn(dev_id, _ls_lights(t))
+ self._cmd("device", "light", "on", dev_id, t)
+ self.assertIn(dev_id, _ls_lights(t))
+
+ health = {
+ 'ident': 'DEVICE_IDENT_ON',
+ 'fault': 'DEVICE_FAULT_ON',
+ }[t]
+ self.wait_for_health(health, 30)
+
+ self._cmd("device", "light", "off", dev_id, t)
+ self.assertNotIn(dev_id, _ls_lights(t))
+
+ self.wait_for_health_clear(30)
+
+ def test_mds_add(self):
+ self._orch_cmd('daemon', 'add', 'mds', 'fsname')
+
+ def test_rgw_add(self):
+ self._orch_cmd('daemon', 'add', 'rgw', 'realm', 'zone')
+
+ def test_nfs_add(self):
+ self._orch_cmd('daemon', 'add', "nfs", "service_name")
+
+ def test_osd_rm(self):
+ self._orch_cmd('daemon', "rm", "osd.0", '--force')
+
+ def test_mds_rm(self):
+ self._orch_cmd("daemon", "rm", "mds.fsname")
+
+ def test_rgw_rm(self):
+ self._orch_cmd("daemon", "rm", "rgw.myrealm.myzone")
+
+ def test_nfs_rm(self):
+ self._orch_cmd("daemon", "rm", "nfs.service_name")
+
+ def test_host_ls(self):
+ out = self._orch_cmd("host", "ls", "--format=json")
+ hosts = json.loads(out)
+ self.assertEqual(len(hosts), 1)
+ self.assertEqual(hosts[0]["hostname"], "localhost")
+
+ def test_host_add(self):
+ self._orch_cmd("host", "add", "hostname")
+
+ def test_host_rm(self):
+ self._orch_cmd("host", "rm", "hostname")
+
+ def test_mon_update(self):
+ self._orch_cmd("apply", "mon", "3 host1:1.2.3.0/24 host2:1.2.3.0/24 host3:10.0.0.0/8")
+ self._orch_cmd("apply", "mon", "3 host1:1.2.3.4 host2:1.2.3.4 host3:10.0.0.1")
+
+ def test_mgr_update(self):
+ self._orch_cmd("apply", "mgr", "3")
+
+ def test_nfs_update(self):
+ self._orch_cmd("apply", "nfs", "service_name", "2")
+
+ def test_error(self):
+ ret = self._orch_cmd_result("host", "add", "raise_validation_error")
+ self.assertEqual(ret, errno.EINVAL)
+ ret = self._orch_cmd_result("host", "add", "raise_error")
+ self.assertEqual(ret, errno.EINVAL)
+ ret = self._orch_cmd_result("host", "add", "raise_bug")
+ self.assertEqual(ret, errno.EINVAL)
+ ret = self._orch_cmd_result("host", "add", "raise_not_implemented")
+ self.assertEqual(ret, errno.ENOENT)
+ ret = self._orch_cmd_result("host", "add", "raise_no_orchestrator")
+ self.assertEqual(ret, errno.ENOENT)
+ ret = self._orch_cmd_result("host", "add", "raise_import_error")
+ self.assertEqual(ret, errno.ENOENT)
+
+ def test_load_data(self):
+ data = {
+ 'inventory': [
+ {
+ 'name': 'host0',
+ 'devices': [
+ {
+ 'type': 'hdd',
+ 'id': '/dev/sda',
+ 'size': 1024**4 * 4,
+ 'rotates': True
+ }
+ ]
+ },
+ {
+ 'name': 'host1',
+ 'devices': [
+ {
+ 'type': 'hdd',
+ 'id': '/dev/sda',
+ 'size': 1024**4 * 4,
+ 'rotates': True
+ }
+ ]
+ }
+ ],
+ 'daemons': [
+ {
+ 'hostname': 'host0',
+ 'daemon_type': 'mon',
+ 'daemon_id': 'a'
+ },
+ {
+ 'hostname': 'host1',
+ 'daemon_type': 'osd',
+ 'daemon_id': '1'
+ }
+ ]
+ }
+
+ ret = self._test_orchestrator_cmd_result('load_data', '-i', '-', stdin=json.dumps(data))
+ self.assertEqual(ret, 0)
+ out = self._orch_cmd('device', 'ls', '--format=json')
+ inventory = data['inventory']
+ inventory_result = json.loads(out)
+ self.assertEqual(len(inventory), len(inventory_result))
+
+ out = self._orch_cmd('device', 'ls', 'host0', '--format=json')
+ inventory_result = json.loads(out)
+ self.assertEqual(len(inventory_result), 1)
+ self.assertEqual(inventory_result[0]['name'], 'host0')
+
+ out = self._orch_cmd('ps', '--format=json')
+ daemons = data['daemons']
+ daemons_result = json.loads(out)
+ self.assertEqual(len(daemons), len(daemons_result))
+
+ out = self._orch_cmd('ps', 'host0', '--format=json')
+ daemons_result = json.loads(out)
+ self.assertEqual(len(daemons_result), 1)
+ self.assertEqual(daemons_result[0]['hostname'], 'host0')
+
+ # test invalid input file: invalid json
+ json_str = '{ "inventory: '
+ ret = self._test_orchestrator_cmd_result('load_data', '-i', '-', stdin=json_str)
+ self.assertEqual(ret, errno.EINVAL)
+
+ # test invalid input file: missing key
+ json_str = '{ "inventory": [{"devices": []}] }'
+ ret = self._test_orchestrator_cmd_result('load_data', '-i', '-', stdin=json_str)
+ self.assertEqual(ret, errno.EINVAL)
+
+ # load empty data for other tests
+ ret = self._test_orchestrator_cmd_result('load_data', '-i', '-', stdin='{}')
+ self.assertEqual(ret, 0)
diff --git a/qa/tasks/mgr/test_progress.py b/qa/tasks/mgr/test_progress.py
new file mode 100644
index 000000000..a80600c6a
--- /dev/null
+++ b/qa/tasks/mgr/test_progress.py
@@ -0,0 +1,423 @@
+
+import json
+import logging
+import time
+from .mgr_test_case import MgrTestCase
+from contextlib import contextmanager
+
+log = logging.getLogger(__name__)
+
+
+class TestProgress(MgrTestCase):
+ POOL = "progress_data"
+
+ # How long we expect to wait at most between taking an OSD out
+ # and seeing the progress event pop up.
+ EVENT_CREATION_PERIOD = 60
+
+ WRITE_PERIOD = 30
+
+ # Generous period for OSD recovery, should be same order of magnitude
+ # to how long it took to write the data to begin with
+ RECOVERY_PERIOD = WRITE_PERIOD * 4
+
+ def _get_progress(self):
+ out = self.mgr_cluster.mon_manager.raw_cluster_cmd("progress", "json")
+ return json.loads(out)
+
+ def _all_events(self):
+ """
+ To avoid racing on completion, we almost always want to look
+ for events in the total list of active and complete, so
+ munge them into a single list.
+ """
+ p = self._get_progress()
+ log.info(json.dumps(p, indent=2))
+ return p['events'] + p['completed']
+
+ def _events_in_progress(self):
+ """
+ this function returns all events that are in progress
+ """
+ p = self._get_progress()
+ log.info(json.dumps(p, indent=2))
+ return p['events']
+
+ def _completed_events(self):
+ """
+ This function returns all events that are completed
+ """
+ p = self._get_progress()
+ log.info(json.dumps(p, indent=2))
+ return p['completed']
+
+ def is_osd_marked_out(self, ev):
+ return ev['message'].endswith('marked out')
+
+ def is_osd_marked_in(self, ev):
+ return ev['message'].endswith('marked in')
+
+ def _get_osd_in_out_events(self, marked='both'):
+ """
+ Return the event that deals with OSDs being
+ marked in, out or both
+ """
+
+ marked_in_events = []
+ marked_out_events = []
+
+ events_in_progress = self._events_in_progress()
+ for ev in events_in_progress:
+ if self.is_osd_marked_out(ev):
+ marked_out_events.append(ev)
+ elif self.is_osd_marked_in(ev):
+ marked_in_events.append(ev)
+
+ if marked == 'both':
+ return [marked_in_events] + [marked_out_events]
+ elif marked == 'in':
+ return marked_in_events
+ else:
+ return marked_out_events
+
+ def _osd_in_out_events_count(self, marked='both'):
+ """
+ Count the number of on going recovery events that deals with
+ OSDs being marked in, out or both.
+ """
+ events_in_progress = self._events_in_progress()
+ marked_in_count = 0
+ marked_out_count = 0
+
+ for ev in events_in_progress:
+ if self.is_osd_marked_out(ev):
+ marked_out_count += 1
+ elif self.is_osd_marked_in(ev):
+ marked_in_count += 1
+
+ if marked == 'both':
+ return marked_in_count + marked_out_count
+ elif marked == 'in':
+ return marked_in_count
+ else:
+ return marked_out_count
+
+ def _setup_pool(self, size=None):
+ self.mgr_cluster.mon_manager.create_pool(self.POOL)
+ if size is not None:
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'pool', 'set', self.POOL, 'size', str(size))
+
+ def _osd_in_out_completed_events_count(self, marked='both'):
+ """
+ Count the number of completed recovery events that deals with
+ OSDs being marked in, out, or both.
+ """
+
+ completed_events = self._completed_events()
+ marked_in_count = 0
+ marked_out_count = 0
+
+ for ev in completed_events:
+ if self.is_osd_marked_out(ev):
+ marked_out_count += 1
+ elif self.is_osd_marked_in(ev):
+ marked_in_count += 1
+
+ if marked == 'both':
+ return marked_in_count + marked_out_count
+ elif marked == 'in':
+ return marked_in_count
+ else:
+ return marked_out_count
+
+ def _write_some_data(self, t):
+ """
+ To adapt to test systems of varying performance, we write
+ data for a defined time period, rather than to a defined
+ capacity. This will hopefully result in a similar timescale
+ for PG recovery after an OSD failure.
+ """
+
+ args = [
+ "rados", "-p", self.POOL, "bench", str(t), "write", "-t", "16"]
+
+ self.mgr_cluster.admin_remote.run(args=args, wait=True)
+
+ def _osd_count(self):
+ osd_map = self.mgr_cluster.mon_manager.get_osd_dump_json()
+ return len(osd_map['osds'])
+
+ @contextmanager
+ def recovery_backfill_disabled(self):
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'set', 'nobackfill')
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'set', 'norecover')
+ yield
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'unset', 'nobackfill')
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'unset', 'norecover')
+
+ def setUp(self):
+ super(TestProgress, self).setUp()
+ # Ensure we have at least four OSDs
+ if self._osd_count() < 4:
+ self.skipTest("Not enough OSDS!")
+
+ # Remove any filesystems so that we can remove their pools
+ if self.mds_cluster:
+ self.mds_cluster.mds_stop()
+ self.mds_cluster.mds_fail()
+ self.mds_cluster.delete_all_filesystems()
+
+ # Remove all other pools
+ for pool in self.mgr_cluster.mon_manager.get_osd_dump_json()['pools']:
+ self.mgr_cluster.mon_manager.remove_pool(pool['pool_name'])
+
+ self._load_module("progress")
+ self.mgr_cluster.mon_manager.raw_cluster_cmd('progress', 'clear')
+
+ def _simulate_failure(self, osd_ids=None):
+ """
+ Common lead-in to several tests: get some data in the cluster,
+ then mark an OSD out to trigger the start of a progress event.
+
+ Return the JSON representation of the failure event.
+ """
+
+ if osd_ids is None:
+ osd_ids = [0]
+
+ self._setup_pool()
+ self._write_some_data(self.WRITE_PERIOD)
+ with self.recovery_backfill_disabled():
+ for osd_id in osd_ids:
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'out', str(osd_id))
+
+ # Wait for a progress event to pop up
+ self.wait_until_equal(lambda: self._osd_in_out_events_count('out'), 1,
+ timeout=self.EVENT_CREATION_PERIOD,
+ period=1)
+
+ ev = self._get_osd_in_out_events('out')[0]
+ log.info(json.dumps(ev, indent=1))
+ self.assertIn("Rebalancing after osd.0 marked out", ev['message'])
+ return ev
+
+ def _simulate_back_in(self, osd_ids, initial_event):
+ for osd_id in osd_ids:
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'in', str(osd_id))
+
+ # First Event should complete promptly
+ self.wait_until_true(lambda: self._is_complete(initial_event['id']),
+ timeout=self.RECOVERY_PERIOD)
+
+ with self.recovery_backfill_disabled():
+
+ try:
+ # Wait for progress event marked in to pop up
+ self.wait_until_equal(lambda: self._osd_in_out_events_count('in'), 1,
+ timeout=self.EVENT_CREATION_PERIOD,
+ period=1)
+ except RuntimeError as ex:
+ if not "Timed out after" in str(ex):
+ raise ex
+
+ log.info("There was no PGs affected by osd being marked in")
+ return None
+
+ new_event = self._get_osd_in_out_events('in')[0]
+ return new_event
+
+ def _no_events_anywhere(self):
+ """
+ Whether there are any live or completed events
+ """
+ p = self._get_progress()
+ total_events = len(p['events']) + len(p['completed'])
+ return total_events == 0
+
+ def _is_quiet(self):
+ """
+ Whether any progress events are live.
+ """
+ return len(self._get_progress()['events']) == 0
+
+ def _is_complete(self, ev_id):
+ progress = self._get_progress()
+ live_ids = [ev['id'] for ev in progress['events']]
+ complete_ids = [ev['id'] for ev in progress['completed']]
+ if ev_id in complete_ids:
+ assert ev_id not in live_ids
+ return True
+ else:
+ assert ev_id in live_ids
+ return False
+
+ def _is_inprogress_or_complete(self, ev_id):
+ for ev in self._events_in_progress():
+ if ev['id'] == ev_id:
+ return ev['progress'] > 0
+ # check if the event completed
+ return self._is_complete(ev_id)
+
+ def tearDown(self):
+ if self.POOL in self.mgr_cluster.mon_manager.pools:
+ self.mgr_cluster.mon_manager.remove_pool(self.POOL)
+
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'unset', 'nobackfill')
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'unset', 'norecover')
+
+ osd_map = self.mgr_cluster.mon_manager.get_osd_dump_json()
+ for osd in osd_map['osds']:
+ if osd['weight'] == 0.0:
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'in', str(osd['osd']))
+
+ # Unset allow_pg_recovery_event in case it's set to true
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'config', 'set', 'mgr',
+ 'mgr/progress/allow_pg_recovery_event', 'false')
+
+ super(TestProgress, self).tearDown()
+
+ def test_osd_healthy_recovery(self):
+ """
+ The simple recovery case: an OSD goes down, its PGs get a new
+ placement, and we wait for the PG to get healthy in its new
+ locations.
+ """
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'config', 'set', 'mgr',
+ 'mgr/progress/allow_pg_recovery_event', 'true')
+
+ ev = self._simulate_failure()
+
+ # Wait for progress event to ultimately reach completion
+ self.wait_until_true(lambda: self._is_complete(ev['id']),
+ timeout=self.RECOVERY_PERIOD)
+ self.assertEqual(self._osd_in_out_events_count(), 0)
+
+ def test_pool_removal(self):
+ """
+ That a pool removed during OSD recovery causes the
+ progress event to be correctly marked complete once there
+ is no more data to move.
+ """
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'config', 'set', 'mgr',
+ 'mgr/progress/allow_pg_recovery_event', 'true')
+
+ ev = self._simulate_failure()
+
+ self.mgr_cluster.mon_manager.remove_pool(self.POOL)
+
+ # Event should complete promptly
+ self.wait_until_true(lambda: self._is_complete(ev['id']),
+ timeout=self.RECOVERY_PERIOD)
+ self.assertEqual(self._osd_in_out_events_count(), 0)
+
+ def test_osd_came_back(self):
+ """
+ When a recovery is underway, but then the out OSD
+ comes back in, such that recovery is no longer necessary.
+ It should create another event for when osd is marked in
+ and cancel the one that is still ongoing.
+ """
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'config', 'set', 'mgr',
+ 'mgr/progress/allow_pg_recovery_event', 'true')
+
+ ev1 = self._simulate_failure()
+
+ ev2 = self._simulate_back_in([0], ev1)
+
+ if ev2 is not None:
+ # Wait for progress event to ultimately complete
+ self.wait_until_true(lambda: self._is_complete(ev2['id']),
+ timeout=self.RECOVERY_PERIOD)
+
+ self.assertEqual(self._osd_in_out_events_count(), 0)
+
+ def test_turn_off_module(self):
+ """
+ When the the module is turned off, there should not
+ be any on going events or completed events.
+ Also module should not accept any kind of Remote Event
+ coming in from other module, however, once it is turned
+ back, on creating an event should be working as it is.
+ """
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'config', 'set', 'mgr',
+ 'mgr/progress/allow_pg_recovery_event', 'true')
+
+ pool_size = 3
+ self._setup_pool(size=pool_size)
+ self._write_some_data(self.WRITE_PERIOD)
+ self.mgr_cluster.mon_manager.raw_cluster_cmd("progress", "off")
+
+ with self.recovery_backfill_disabled():
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'out', '0')
+
+ time.sleep(self.EVENT_CREATION_PERIOD/2)
+
+ with self.recovery_backfill_disabled():
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'in', '0')
+
+ time.sleep(self.EVENT_CREATION_PERIOD/2)
+
+ self.assertTrue(self._no_events_anywhere())
+
+ self.mgr_cluster.mon_manager.raw_cluster_cmd("progress", "on")
+
+ self._write_some_data(self.WRITE_PERIOD)
+
+ with self.recovery_backfill_disabled():
+
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'out', '0')
+
+ # Wait for a progress event to pop up
+ self.wait_until_equal(lambda: self._osd_in_out_events_count('out'), 1,
+ timeout=self.EVENT_CREATION_PERIOD,
+ period=1)
+
+ ev1 = self._get_osd_in_out_events('out')[0]
+
+ log.info(json.dumps(ev1, indent=1))
+
+ self.wait_until_true(lambda: self._is_complete(ev1['id']),
+ check_fn=lambda: self._is_inprogress_or_complete(ev1['id']),
+ timeout=self.RECOVERY_PERIOD)
+ self.assertTrue(self._is_quiet())
+
+ def test_default_progress_test(self):
+ """
+ progress module disabled the event of pg recovery event
+ by default, we test this to see if this holds true
+ """
+ pool_size = 3
+ self._setup_pool(size=pool_size)
+ self._write_some_data(self.WRITE_PERIOD)
+
+ with self.recovery_backfill_disabled():
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'out', '0')
+
+ time.sleep(self.EVENT_CREATION_PERIOD/2)
+
+ with self.recovery_backfill_disabled():
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'in', '0')
+
+ time.sleep(self.EVENT_CREATION_PERIOD/2)
+
+ self.assertEqual(self._osd_in_out_events_count(), 0)
diff --git a/qa/tasks/mgr/test_prometheus.py b/qa/tasks/mgr/test_prometheus.py
new file mode 100644
index 000000000..376556ab3
--- /dev/null
+++ b/qa/tasks/mgr/test_prometheus.py
@@ -0,0 +1,79 @@
+import json
+import logging
+import requests
+
+from .mgr_test_case import MgrTestCase
+
+log = logging.getLogger(__name__)
+
+
+class TestPrometheus(MgrTestCase):
+ MGRS_REQUIRED = 3
+
+ def setUp(self):
+ super(TestPrometheus, self).setUp()
+ self.setup_mgrs()
+
+ def test_file_sd_command(self):
+ self._assign_ports("prometheus", "server_port")
+ self._load_module("prometheus")
+
+ result = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ "prometheus", "file_sd_config"))
+ mgr_map = self.mgr_cluster.get_mgr_map()
+ self.assertEqual(len(result[0]['targets']), len(mgr_map['standbys']) + 1)
+
+
+
+ def test_standby(self):
+ self._assign_ports("prometheus", "server_port")
+ self._load_module("prometheus")
+
+ original_active = self.mgr_cluster.get_active_id()
+
+ original_uri = self._get_uri("prometheus")
+ log.info("Originally running at {0}".format(original_uri))
+
+ self.mgr_cluster.mgr_fail(original_active)
+
+ failed_over_uri = self._get_uri("prometheus")
+ log.info("After failover running at {0}".format(failed_over_uri))
+
+ self.assertNotEqual(original_uri, failed_over_uri)
+
+ # The original active daemon should have come back up as a standby
+ # and serve some html under "/" and an empty answer under /metrics
+ r = requests.get(original_uri, allow_redirects=False)
+ self.assertEqual(r.status_code, 200)
+ r = requests.get(original_uri + "metrics", allow_redirects=False)
+ self.assertEqual(r.status_code, 200)
+ self.assertEqual(r.headers["content-type"], "text/plain;charset=utf-8")
+ self.assertEqual(r.headers["server"], "Ceph-Prometheus")
+
+ def test_urls(self):
+ self._assign_ports("prometheus", "server_port")
+ self._load_module("prometheus")
+
+ base_uri = self._get_uri("prometheus")
+
+ # This is a very simple smoke test to check that the module can
+ # give us a 200 response to requests. We're not testing that
+ # the content is correct or even renders!
+
+ urls = [
+ "/",
+ "/metrics"
+ ]
+
+ failures = []
+
+ for url in urls:
+ r = requests.get(base_uri + url, allow_redirects=False)
+ if r.status_code != 200:
+ failures.append(url)
+
+ log.info("{0}: {1} ({2} bytes)".format(
+ url, r.status_code, len(r.content)
+ ))
+
+ self.assertListEqual(failures, [])