summaryrefslogtreecommitdiffstats
path: root/src/pybind/mgr/dashboard/controllers
diff options
context:
space:
mode:
Diffstat (limited to 'src/pybind/mgr/dashboard/controllers')
-rw-r--r--src/pybind/mgr/dashboard/controllers/_crud.py14
-rw-r--r--src/pybind/mgr/dashboard/controllers/ceph_users.py3
-rw-r--r--src/pybind/mgr/dashboard/controllers/cephfs.py422
-rw-r--r--src/pybind/mgr/dashboard/controllers/osd.py15
-rw-r--r--src/pybind/mgr/dashboard/controllers/rgw.py143
5 files changed, 555 insertions, 42 deletions
diff --git a/src/pybind/mgr/dashboard/controllers/_crud.py b/src/pybind/mgr/dashboard/controllers/_crud.py
index 4a57ac06c..100e5fe4b 100644
--- a/src/pybind/mgr/dashboard/controllers/_crud.py
+++ b/src/pybind/mgr/dashboard/controllers/_crud.py
@@ -104,6 +104,7 @@ class Validator(Enum):
RGW_ROLE_NAME = 'rgwRoleName'
RGW_ROLE_PATH = 'rgwRolePath'
FILE = 'file'
+ RGW_ROLE_SESSION_DURATION = 'rgwRoleSessionDuration'
class FormField(NamedTuple):
@@ -224,6 +225,10 @@ class Container:
properties[field.key]['title'] = field.name
field_ui_schema['key'] = field_key
field_ui_schema['readonly'] = field.readonly
+ if field.readonly:
+ field_ui_schema['templateOptions'] = {
+ 'disabled': True
+ }
field_ui_schema['help'] = f'{field.help}'
field_ui_schema['validators'] = [i.value for i in field.validators]
items.append(field_ui_schema)
@@ -307,6 +312,7 @@ class CRUDMeta(SerializableClass):
self.forms = []
self.columnKey = ''
self.detail_columns = []
+ self.resource = ''
class CRUDCollectionMethod(NamedTuple):
@@ -330,6 +336,7 @@ class CRUDEndpoint:
actions: Optional[List[TableAction]] = None,
permissions: Optional[List[str]] = None, forms: Optional[List[Form]] = None,
column_key: Optional[str] = None,
+ resource: Optional[str] = None,
meta: CRUDMeta = CRUDMeta(), get_all: Optional[CRUDCollectionMethod] = None,
create: Optional[CRUDCollectionMethod] = None,
delete: Optional[CRUDCollectionMethod] = None,
@@ -352,6 +359,7 @@ class CRUDEndpoint:
self.detail_columns = detail_columns if detail_columns is not None else []
self.extra_endpoints = extra_endpoints if extra_endpoints is not None else []
self.selection_type = selection_type
+ self.resource = resource
def __call__(self, cls: Any):
self.create_crud_class(cls)
@@ -415,6 +423,7 @@ class CRUDEndpoint:
self.generate_forms(model_key)
self.set_permissions()
self.set_column_key()
+ self.set_table_resource()
self.get_detail_columns()
selection_type = self.__class__.outer_self.selection_type
self.__class__.outer_self.meta.table.set_selection_type(selection_type)
@@ -468,6 +477,10 @@ class CRUDEndpoint:
if self.__class__.outer_self.column_key:
self.outer_self.meta.columnKey = self.__class__.outer_self.column_key
+ def set_table_resource(self):
+ if self.__class__.outer_self.resource:
+ self.outer_self.meta.resource = self.__class__.outer_self.resource
+
class_name = self.router.path.replace('/', '')
meta_class = type(f'{class_name}_CRUDClassMetadata',
(RESTController,),
@@ -478,6 +491,7 @@ class CRUDEndpoint:
'generate_forms': generate_forms,
'set_permissions': set_permissions,
'set_column_key': set_column_key,
+ 'set_table_resource': set_table_resource,
'get_detail_columns': get_detail_columns,
'outer_self': self,
})
diff --git a/src/pybind/mgr/dashboard/controllers/ceph_users.py b/src/pybind/mgr/dashboard/controllers/ceph_users.py
index e1bdc1570..022f8f36c 100644
--- a/src/pybind/mgr/dashboard/controllers/ceph_users.py
+++ b/src/pybind/mgr/dashboard/controllers/ceph_users.py
@@ -174,7 +174,7 @@ edit_form = Form(path='/cluster/user/edit',
TableAction(name='Create', permission='create', icon=Icon.ADD.value,
routerLink='/cluster/user/create'),
TableAction(name='Edit', permission='update', icon=Icon.EDIT.value,
- click='edit'),
+ click='edit', routerLink='/cluster/user/edit'),
TableAction(name='Delete', permission='delete', icon=Icon.DESTROY.value,
click='delete', disable=True),
TableAction(name='Import', permission='create', icon=Icon.IMPORT.value,
@@ -185,6 +185,7 @@ edit_form = Form(path='/cluster/user/edit',
permissions=[Scope.CONFIG_OPT],
forms=[create_form, edit_form, import_user_form],
column_key='entity',
+ resource='user',
get_all=CRUDCollectionMethod(
func=CephUserEndpoints.user_list,
doc=EndpointDoc("Get Ceph Users")
diff --git a/src/pybind/mgr/dashboard/controllers/cephfs.py b/src/pybind/mgr/dashboard/controllers/cephfs.py
index 09b2bebfc..fcd87833f 100644
--- a/src/pybind/mgr/dashboard/controllers/cephfs.py
+++ b/src/pybind/mgr/dashboard/controllers/cephfs.py
@@ -1,9 +1,11 @@
# -*- coding: utf-8 -*-
+# pylint: disable=too-many-lines
+import errno
import json
import logging
import os
from collections import defaultdict
-from typing import Any, Dict
+from typing import Any, Dict, List
import cephfs
import cherrypy
@@ -22,6 +24,11 @@ GET_QUOTAS_SCHEMA = {
'max_bytes': (int, ''),
'max_files': (int, '')
}
+GET_STATFS_SCHEMA = {
+ 'bytes': (int, ''),
+ 'files': (int, ''),
+ 'subdirs': (int, '')
+}
logger = logging.getLogger("controllers.rgw")
@@ -49,7 +56,7 @@ class CephFS(RESTController):
service_spec_str = service_spec_str[:-1]
if 'hosts' in service_spec['placement']:
for host in service_spec['placement']['hosts']:
- service_spec_str += f'{host},'
+ service_spec_str += f'{host} '
service_spec_str = service_spec_str[:-1]
error_code, _, err = mgr.remote('volumes', '_cmd_fs_volume_create', None,
@@ -94,6 +101,29 @@ class CephFS(RESTController):
component='cephfs')
return f'Volume {name} renamed successfully to {new_name}'
+ @UpdatePermission
+ @Endpoint('PUT')
+ @EndpointDoc("Set Ceph authentication capabilities for the specified user ID in the given path",
+ parameters={
+ 'fs_name': (str, 'File system name'),
+ 'client_id': (str, 'Cephx user ID'),
+ 'caps': (str, 'Path and given capabilities'),
+ 'root_squash': (str, 'File System Identifier'),
+
+ })
+ def auth(self, fs_name: str, client_id: int, caps: List[str], root_squash: bool):
+ if root_squash:
+ caps.insert(2, 'root_squash')
+ error_code, _, err = mgr.mon_command({'prefix': 'fs authorize',
+ 'filesystem': fs_name,
+ 'entity': client_id,
+ 'caps': caps})
+ if error_code != 0:
+ raise DashboardException(
+ msg=f'Error setting authorization for {client_id} with {caps}: {err}',
+ component='cephfs')
+ return f'Updated {client_id} authorization successfully'
+
def get(self, fs_id):
fs_id = self.fs_id_to_int(fs_id)
return self.fs_status(fs_id)
@@ -330,13 +360,16 @@ class CephFS(RESTController):
standby_table = self.get_standby_table(fsmap['standbys'], mds_versions)
+ flags = mdsmap['flags_state']
+
return {
"cephfs": {
"id": fs_id,
"name": mdsmap['fs_name'],
"client_count": client_count,
"ranks": rank_table,
- "pools": pools_table
+ "pools": pools_table,
+ "flags": flags,
},
"standbys": standby_table,
"versions": mds_versions
@@ -359,7 +392,7 @@ class CephFS(RESTController):
"No cephfs with id {0}".format(fs_id))
# Decorate the metadata with some fields that will be
- # indepdendent of whether it's a kernel or userspace
+ # independent of whether it's a kernel or userspace
# client, so that the javascript doesn't have to grok that.
for client in clients:
if "ceph_version" in client['client_metadata']: # pragma: no cover - no complexity
@@ -518,6 +551,47 @@ class CephFS(RESTController):
cfs = self._cephfs_instance(fs_id)
return cfs.get_quotas(path)
+ @RESTController.Resource('POST', path='/write_to_file')
+ @allow_empty_body
+ def write_to_file(self, fs_id, path, buf) -> None:
+ """
+ Write some data to the specified path.
+ :param fs_id: The filesystem identifier.
+ :param path: The path of the file to write.
+ :param buf: The str to write to the buf.
+ """
+ cfs = self._cephfs_instance(fs_id)
+ cfs.write_to_file(path, buf)
+
+ @RESTController.Resource('DELETE', path='/unlink')
+ def unlink(self, fs_id, path) -> None:
+ """
+ Removes a file, link, or symbolic link.
+ :param fs_id: The filesystem identifier.
+ :param path: The path of the file or link to unlink.
+ """
+ cfs = self._cephfs_instance(fs_id)
+ cfs.unlink(path)
+
+ @RESTController.Resource('GET', path='/statfs')
+ @EndpointDoc("Get Cephfs statfs of the specified path",
+ parameters={
+ 'fs_id': (str, 'File System Identifier'),
+ 'path': (str, 'File System Path'),
+ },
+ responses={200: GET_STATFS_SCHEMA})
+ def statfs(self, fs_id, path) -> dict:
+ """
+ Get the statfs of the specified path.
+ :param fs_id: The filesystem identifier.
+ :param path: The path of the directory/file.
+ :return: Returns a dictionary containing 'bytes',
+ 'files' and 'subdirs'.
+ :rtype: dict
+ """
+ cfs = self._cephfs_instance(fs_id)
+ return cfs.statfs(path)
+
@RESTController.Resource('POST', path='/snapshot')
@allow_empty_body
def snapshot(self, fs_id, path, name=None):
@@ -560,7 +634,11 @@ class CephFSClients(object):
@ViewCache()
def get(self):
- return CephService.send_command('mds', 'session ls', srv_spec='{0}:0'.format(self.fscid))
+ try:
+ ret = CephService.send_command('mds', 'session ls', srv_spec='{0}:0'.format(self.fscid))
+ except RuntimeError:
+ ret = []
+ return ret
@UIRouter('/cephfs', Scope.CEPHFS)
@@ -623,7 +701,7 @@ class CephFsUi(CephFS):
@APIDoc('CephFS Subvolume Management API', 'CephFSSubvolume')
class CephFSSubvolume(RESTController):
- def get(self, vol_name: str, group_name: str = ""):
+ def get(self, vol_name: str, group_name: str = "", info=True):
params = {'vol_name': vol_name}
if group_name:
params['group_name'] = group_name
@@ -634,15 +712,23 @@ class CephFSSubvolume(RESTController):
f'Failed to list subvolumes for volume {vol_name}: {err}'
)
subvolumes = json.loads(out)
- for subvolume in subvolumes:
- params['sub_name'] = subvolume['name']
- error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolume_info', None,
- params)
- if error_code != 0:
- raise DashboardException(
- f'Failed to get info for subvolume {subvolume["name"]}: {err}'
- )
- subvolume['info'] = json.loads(out)
+
+ if info:
+ for subvolume in subvolumes:
+ params['sub_name'] = subvolume['name']
+ error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolume_info', None,
+ params)
+ # just ignore this error for now so the subvolumes page will load.
+ # the ideal solution is to implement a status page where clone status
+ # can be displayed
+ if error_code == -errno.EAGAIN:
+ pass
+ elif error_code != 0:
+ raise DashboardException(
+ f'Failed to get info for subvolume {subvolume["name"]}: {err}'
+ )
+ if out:
+ subvolume['info'] = json.loads(out)
return subvolumes
@RESTController.Resource('GET')
@@ -699,12 +785,27 @@ class CephFSSubvolume(RESTController):
component='cephfs')
return f'Subvolume {subvol_name} removed successfully'
+ @RESTController.Resource('GET')
+ def exists(self, vol_name: str, group_name=''):
+ params = {'vol_name': vol_name}
+ if group_name:
+ params['group_name'] = group_name
+ error_code, out, err = mgr.remote(
+ 'volumes', '_cmd_fs_subvolume_exist', None, params)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to check if subvolume exists: {err}'
+ )
+ if out == 'no subvolume exists':
+ return False
+ return True
+
@APIRouter('/cephfs/subvolume/group', Scope.CEPHFS)
@APIDoc("Cephfs Subvolume Group Management API", "CephfsSubvolumeGroup")
class CephFSSubvolumeGroups(RESTController):
- def get(self, vol_name):
+ def get(self, vol_name, info=True):
if not vol_name:
raise DashboardException(
f'Error listing subvolume groups for {vol_name}')
@@ -714,15 +815,17 @@ class CephFSSubvolumeGroups(RESTController):
raise DashboardException(
f'Error listing subvolume groups for {vol_name}')
subvolume_groups = json.loads(out)
- for group in subvolume_groups:
- error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolumegroup_info',
- None, {'vol_name': vol_name,
- 'group_name': group['name']})
- if error_code != 0:
- raise DashboardException(
- f'Failed to get info for subvolume group {group["name"]}: {err}'
- )
- group['info'] = json.loads(out)
+
+ if info:
+ for group in subvolume_groups:
+ error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolumegroup_info',
+ None, {'vol_name': vol_name,
+ 'group_name': group['name']})
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to get info for subvolume group {group["name"]}: {err}'
+ )
+ group['info'] = json.loads(out)
return subvolume_groups
@RESTController.Resource('GET')
@@ -763,3 +866,272 @@ class CephFSSubvolumeGroups(RESTController):
f'Failed to delete subvolume group {group_name}: {err}'
)
return f'Subvolume group {group_name} removed successfully'
+
+
+@APIRouter('/cephfs/subvolume/snapshot', Scope.CEPHFS)
+@APIDoc("Cephfs Subvolume Snapshot Management API", "CephfsSubvolumeSnapshot")
+class CephFSSubvolumeSnapshots(RESTController):
+ def get(self, vol_name: str, subvol_name, group_name: str = '', info=True):
+ params = {'vol_name': vol_name, 'sub_name': subvol_name}
+ if group_name:
+ params['group_name'] = group_name
+ error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolume_snapshot_ls', None,
+ params)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to list subvolume snapshots for subvolume {subvol_name}: {err}'
+ )
+ snapshots = json.loads(out)
+
+ if info:
+ for snapshot in snapshots:
+ params['snap_name'] = snapshot['name']
+ error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolume_snapshot_info',
+ None, params)
+ # just ignore this error for now so the subvolumes page will load.
+ # the ideal solution is to implement a status page where clone status
+ # can be displayed
+ if error_code == -errno.EAGAIN:
+ pass
+ elif error_code != 0:
+ raise DashboardException(
+ f'Failed to get info for subvolume snapshot {snapshot["name"]}: {err}'
+ )
+ if out:
+ snapshot['info'] = json.loads(out)
+ return snapshots
+
+ @RESTController.Resource('GET')
+ def info(self, vol_name: str, subvol_name: str, snap_name: str, group_name: str = ''):
+ params = {'vol_name': vol_name, 'sub_name': subvol_name, 'snap_name': snap_name}
+ if group_name:
+ params['group_name'] = group_name
+ error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolume_snapshot_info', None,
+ params)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to get info for subvolume snapshot {snap_name}: {err}'
+ )
+ return json.loads(out)
+
+ def create(self, vol_name: str, subvol_name: str, snap_name: str, group_name=''):
+ params = {'vol_name': vol_name, 'sub_name': subvol_name, 'snap_name': snap_name}
+ if group_name:
+ params['group_name'] = group_name
+
+ error_code, _, err = mgr.remote('volumes', '_cmd_fs_subvolume_snapshot_create', None,
+ params)
+
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to create subvolume snapshot {snap_name}: {err}'
+ )
+ return f'Subvolume snapshot {snap_name} created successfully'
+
+ def delete(self, vol_name: str, subvol_name: str, snap_name: str, group_name='', force=True):
+ params = {'vol_name': vol_name, 'sub_name': subvol_name, 'snap_name': snap_name}
+ if group_name:
+ params['group_name'] = group_name
+ params['force'] = str_to_bool(force)
+ error_code, _, err = mgr.remote('volumes', '_cmd_fs_subvolume_snapshot_rm', None,
+ params)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to delete subvolume snapshot {snap_name}: {err}'
+ )
+ return f'Subvolume snapshot {snap_name} removed successfully'
+
+
+@APIRouter('/cephfs/subvolume/snapshot/clone', Scope.CEPHFS)
+@APIDoc("Cephfs Snapshot Clone Management API", "CephfsSnapshotClone")
+class CephFsSnapshotClone(RESTController):
+ @EndpointDoc("Create a clone of a subvolume snapshot")
+ def create(self, vol_name: str, subvol_name: str, snap_name: str, clone_name: str,
+ group_name='', target_group_name=''):
+ params = {'vol_name': vol_name, 'sub_name': subvol_name, 'snap_name': snap_name,
+ 'target_sub_name': clone_name}
+ if group_name:
+ params['group_name'] = group_name
+
+ if target_group_name:
+ params['target_group_name'] = target_group_name
+
+ error_code, _, err = mgr.remote('volumes', '_cmd_fs_subvolume_snapshot_clone', None,
+ params)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to create clone {clone_name}: {err}'
+ )
+ return f'Clone {clone_name} created successfully'
+
+
+@APIRouter('/cephfs/snapshot/schedule', Scope.CEPHFS)
+@APIDoc("Cephfs Snapshot Scheduling API", "CephFSSnapshotSchedule")
+class CephFSSnapshotSchedule(RESTController):
+
+ def list(self, fs: str, path: str = '/', recursive: bool = True):
+ error_code, out, err = mgr.remote('snap_schedule', 'snap_schedule_list',
+ path, recursive, fs, None, None, 'plain')
+ if len(out) == 0:
+ return []
+
+ snapshot_schedule_list = out.split('\n')
+ output: List[Any] = []
+
+ for snap in snapshot_schedule_list:
+ current_path = snap.strip().split(' ')[0]
+ error_code, status_out, err = mgr.remote('snap_schedule', 'snap_schedule_get',
+ current_path, fs, None, None, 'json')
+ output = output + json.loads(status_out)
+
+ output_json = json.dumps(output)
+
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to get list of snapshot schedules for path {path}: {err}'
+ )
+ return json.loads(output_json)
+
+ def create(self, fs: str, path: str, snap_schedule: str, start: str, retention_policy=None,
+ subvol=None, group=None):
+ error_code, _, err = mgr.remote('snap_schedule',
+ 'snap_schedule_add',
+ path,
+ snap_schedule,
+ start,
+ fs,
+ subvol,
+ group)
+
+ if retention_policy:
+ retention_policies = retention_policy.split('|')
+ for retention in retention_policies:
+ retention_count = retention.split('-')[0]
+ retention_spec_or_period = retention.split('-')[1]
+ error_code_retention, _, err_retention = mgr.remote('snap_schedule',
+ 'snap_schedule_retention_add',
+ path,
+ retention_spec_or_period,
+ retention_count,
+ fs,
+ subvol,
+ group)
+ if error_code_retention != 0:
+ raise DashboardException(
+ f'Failed to add retention policy for path {path}: {err_retention}'
+ )
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to create snapshot schedule for path {path}: {err}'
+ )
+
+ return f'Snapshot schedule for path {path} created successfully'
+
+ def set(self, fs: str, path: str, retention_to_add=None, retention_to_remove=None,
+ subvol=None, group=None):
+ def editRetentionPolicies(method, retention_policy):
+ if not retention_policy:
+ return
+
+ retention_policies = retention_policy.split('|')
+ for retention in retention_policies:
+ retention_count = retention.split('-')[0]
+ retention_spec_or_period = retention.split('-')[1]
+ error_code_retention, _, err_retention = mgr.remote('snap_schedule',
+ method,
+ path,
+ retention_spec_or_period,
+ retention_count,
+ fs,
+ subvol,
+ group)
+ if error_code_retention != 0:
+ raise DashboardException(
+ f'Failed to add/remove retention policy for path {path}: {err_retention}'
+ )
+
+ editRetentionPolicies('snap_schedule_retention_rm', retention_to_remove)
+ editRetentionPolicies('snap_schedule_retention_add', retention_to_add)
+
+ return f'Retention policies for snapshot schedule on path {path} updated successfully'
+
+ @RESTController.Resource('DELETE')
+ def delete_snapshot(self, fs: str, path: str, schedule: str, start: str,
+ retention_policy=None, subvol=None, group=None):
+ if retention_policy:
+ # check if there are other snap schedules for this exact same path
+ error_code, out, err = mgr.remote('snap_schedule', 'snap_schedule_list',
+ path, False, fs, subvol, group, 'plain')
+
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to get snapshot schedule list for path {path}: {err}'
+ )
+ # only remove the retention policies if there no other snap schedules for this path
+ snapshot_schedule_list = out.split('\n')
+ if len(snapshot_schedule_list) <= 1:
+ retention_policies = retention_policy.split('|')
+ for retention in retention_policies:
+ retention_count = retention.split('-')[0]
+ retention_spec_or_period = retention.split('-')[1]
+ error_code, _, err = mgr.remote('snap_schedule',
+ 'snap_schedule_retention_rm',
+ path,
+ retention_spec_or_period,
+ retention_count,
+ fs,
+ subvol,
+ group)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to remove retention policy for path {path}: {err}'
+ )
+ # remove snap schedule
+ error_code, _, err = mgr.remote('snap_schedule',
+ 'snap_schedule_rm',
+ path,
+ schedule,
+ start,
+ fs,
+ subvol,
+ group)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to delete snapshot schedule for path {path}: {err}'
+ )
+
+ return f'Snapshot schedule for path {path} deleted successfully'
+
+ @RESTController.Resource('POST')
+ def deactivate(self, fs: str, path: str, schedule: str, start: str, subvol=None, group=None):
+ error_code, _, err = mgr.remote('snap_schedule',
+ 'snap_schedule_deactivate',
+ path,
+ schedule,
+ start,
+ fs,
+ subvol,
+ group)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to deactivate snapshot schedule for path {path}: {err}'
+ )
+
+ return f'Snapshot schedule for path {path} deactivated successfully'
+
+ @RESTController.Resource('POST')
+ def activate(self, fs: str, path: str, schedule: str, start: str, subvol=None, group=None):
+ error_code, _, err = mgr.remote('snap_schedule',
+ 'snap_schedule_activate',
+ path,
+ schedule,
+ start,
+ fs,
+ subvol,
+ group)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to activate snapshot schedule for path {path}: {err}'
+ )
+
+ return f'Snapshot schedule for path {path} activated successfully'
diff --git a/src/pybind/mgr/dashboard/controllers/osd.py b/src/pybind/mgr/dashboard/controllers/osd.py
index f6f8ce1f5..c9d141772 100644
--- a/src/pybind/mgr/dashboard/controllers/osd.py
+++ b/src/pybind/mgr/dashboard/controllers/osd.py
@@ -168,11 +168,18 @@ class Osd(RESTController):
@RESTController.Collection('GET', version=APIVersion.EXPERIMENTAL)
@ReadPermission
def settings(self):
- result = CephService.send_command('mon', 'osd dump')
- return {
- 'nearfull_ratio': result['nearfull_ratio'],
- 'full_ratio': result['full_ratio']
+ data = {
+ 'nearfull_ratio': -1,
+ 'full_ratio': -1
}
+ try:
+ result = CephService.send_command('mon', 'osd dump')
+ data['nearfull_ratio'] = result['nearfull_ratio']
+ data['full_ratio'] = result['full_ratio']
+ except TypeError:
+ logger.error(
+ 'Error setting nearfull_ratio and full_ratio:', exc_info=True)
+ return data
def _get_operational_status(self, osd_id: int, removing_osd_ids: Optional[List[int]]):
if removing_osd_ids is None:
diff --git a/src/pybind/mgr/dashboard/controllers/rgw.py b/src/pybind/mgr/dashboard/controllers/rgw.py
index 9ccf4b36b..240d22f21 100644
--- a/src/pybind/mgr/dashboard/controllers/rgw.py
+++ b/src/pybind/mgr/dashboard/controllers/rgw.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
+# pylint: disable=C0302
import json
import logging
import re
@@ -134,6 +135,16 @@ class RgwDaemon(RESTController):
for service in server['services']:
metadata = service['metadata']
+ frontend_config = metadata['frontend_config#0']
+ port_match = re.search(r"port=(\d+)", frontend_config)
+ port = None
+ if port_match:
+ port = port_match.group(1)
+ else:
+ match_from_endpoint = re.search(r"endpoint=\S+:(\d+)", frontend_config)
+ if match_from_endpoint:
+ port = match_from_endpoint.group(1)
+
# extract per-daemon service data and health
daemon = {
'id': metadata['id'],
@@ -144,7 +155,7 @@ class RgwDaemon(RESTController):
'zonegroup_name': metadata['zonegroup_name'],
'zone_name': metadata['zone_name'],
'default': instance.daemon.name == metadata['id'],
- 'port': int(re.findall(r'port=(\d+)', metadata['frontend_config#0'])[0])
+ 'port': int(port) if port else None
}
daemons.append(daemon)
@@ -276,6 +287,26 @@ class RgwBucket(RgwRESTController):
retention_period_days,
retention_period_years)
+ def _get_policy(self, bucket: str):
+ rgw_client = RgwClient.admin_instance()
+ return rgw_client.get_bucket_policy(bucket)
+
+ def _set_policy(self, bucket_name: str, policy: str, daemon_name, owner):
+ rgw_client = RgwClient.instance(owner, daemon_name)
+ return rgw_client.set_bucket_policy(bucket_name, policy)
+
+ def _set_tags(self, bucket_name, tags, daemon_name, owner):
+ rgw_client = RgwClient.instance(owner, daemon_name)
+ return rgw_client.set_tags(bucket_name, tags)
+
+ def _get_acl(self, bucket_name, daemon_name, owner):
+ rgw_client = RgwClient.instance(owner, daemon_name)
+ return str(rgw_client.get_acl(bucket_name))
+
+ def _set_acl(self, bucket_name: str, acl: str, owner, daemon_name):
+ rgw_client = RgwClient.instance(owner, daemon_name)
+ return rgw_client.set_acl(bucket_name, acl)
+
@staticmethod
def strip_tenant_from_bucket_name(bucket_name):
# type (str) -> str
@@ -328,6 +359,8 @@ class RgwBucket(RgwRESTController):
result['encryption'] = encryption['Status']
result['versioning'] = versioning['Status']
result['mfa_delete'] = versioning['MfaDelete']
+ result['bucket_policy'] = self._get_policy(bucket_name)
+ result['acl'] = self._get_acl(bucket_name, daemon_name, result['owner'])
# Append the locking configuration.
locking = self._get_locking(result['owner'], daemon_name, bucket_name)
@@ -340,7 +373,8 @@ class RgwBucket(RgwRESTController):
lock_enabled='false', lock_mode=None,
lock_retention_period_days=None,
lock_retention_period_years=None, encryption_state='false',
- encryption_type=None, key_id=None, daemon_name=None):
+ encryption_type=None, key_id=None, tags=None,
+ bucket_policy=None, canned_acl=None, daemon_name=None):
lock_enabled = str_to_bool(lock_enabled)
encryption_state = str_to_bool(encryption_state)
try:
@@ -356,6 +390,15 @@ class RgwBucket(RgwRESTController):
if encryption_state:
self._set_encryption(bucket, encryption_type, key_id, daemon_name, uid)
+ if tags:
+ self._set_tags(bucket, tags, daemon_name, uid)
+
+ if bucket_policy:
+ self._set_policy(bucket, bucket_policy, daemon_name, uid)
+
+ if canned_acl:
+ self._set_acl(bucket, canned_acl, uid, daemon_name)
+
return result
except RequestException as e: # pragma: no cover - handling is too obvious
raise DashboardException(e, http_status_code=500, component='rgw')
@@ -365,7 +408,8 @@ class RgwBucket(RgwRESTController):
encryption_state='false', encryption_type=None, key_id=None,
mfa_delete=None, mfa_token_serial=None, mfa_token_pin=None,
lock_mode=None, lock_retention_period_days=None,
- lock_retention_period_years=None, daemon_name=None):
+ lock_retention_period_years=None, tags=None, bucket_policy=None,
+ canned_acl=None, daemon_name=None):
encryption_state = str_to_bool(encryption_state)
# When linking a non-tenant-user owned bucket to a tenanted user, we
# need to prefix bucket name with '/'. e.g. photos -> /photos
@@ -405,6 +449,12 @@ class RgwBucket(RgwRESTController):
self._set_encryption(bucket_name, encryption_type, key_id, daemon_name, uid)
if encryption_status['Status'] == 'Enabled' and (not encryption_state):
self._delete_encryption(bucket_name, daemon_name, uid)
+ if tags:
+ self._set_tags(bucket_name, tags, daemon_name, uid)
+ if bucket_policy:
+ self._set_policy(bucket_name, bucket_policy, daemon_name, uid)
+ if canned_acl:
+ self._set_acl(bucket_name, canned_acl, uid, daemon_name)
return self._append_bid(result)
def delete(self, bucket, purge_objects='true', daemon_name=None):
@@ -540,7 +590,7 @@ class RgwUser(RgwRESTController):
@allow_empty_body
def create(self, uid, display_name, email=None, max_buckets=None,
- suspended=None, generate_key=None, access_key=None,
+ system=None, suspended=None, generate_key=None, access_key=None,
secret_key=None, daemon_name=None):
params = {'uid': uid}
if display_name is not None:
@@ -549,6 +599,8 @@ class RgwUser(RgwRESTController):
params['email'] = email
if max_buckets is not None:
params['max-buckets'] = max_buckets
+ if system is not None:
+ params['system'] = system
if suspended is not None:
params['suspended'] = suspended
if generate_key is not None:
@@ -562,7 +614,7 @@ class RgwUser(RgwRESTController):
@allow_empty_body
def set(self, uid, display_name=None, email=None, max_buckets=None,
- suspended=None, daemon_name=None):
+ system=None, suspended=None, daemon_name=None):
params = {'uid': uid}
if display_name is not None:
params['display-name'] = display_name
@@ -570,6 +622,8 @@ class RgwUser(RgwRESTController):
params['email'] = email
if max_buckets is not None:
params['max-buckets'] = max_buckets
+ if system is not None:
+ params['system'] = system
if suspended is not None:
params['suspended'] = suspended
result = self.proxy(daemon_name, 'POST', 'user', params)
@@ -702,6 +756,36 @@ class RGWRoleEndpoints:
rgw_client.create_role(role_name, role_path, role_assume_policy_doc)
return f'Role {role_name} created successfully'
+ @staticmethod
+ def role_update(_, role_name: str, max_session_duration: str):
+ assert role_name
+ assert max_session_duration
+ # convert max_session_duration which is in hours to seconds
+ max_session_duration = int(float(max_session_duration) * 3600)
+ rgw_client = RgwClient.admin_instance()
+ rgw_client.update_role(role_name, str(max_session_duration))
+ return f'Role {role_name} updated successfully'
+
+ @staticmethod
+ def role_delete(_, role_name: str):
+ assert role_name
+ rgw_client = RgwClient.admin_instance()
+ rgw_client.delete_role(role_name)
+ return f'Role {role_name} deleted successfully'
+
+ @staticmethod
+ def model(role_name: str):
+ assert role_name
+ rgw_client = RgwClient.admin_instance()
+ role = rgw_client.get_role(role_name)
+ model = {'role_name': '', 'max_session_duration': ''}
+ model['role_name'] = role['RoleName']
+
+ # convert maxsessionduration which is in seconds to hours
+ if role['MaxSessionDuration']:
+ model['max_session_duration'] = role['MaxSessionDuration'] / 3600
+ return model
+
# pylint: disable=C0301
assume_role_policy_help = (
@@ -710,6 +794,10 @@ assume_role_policy_help = (
'target="_blank">click here.</a>'
)
+max_session_duration_help = (
+ 'The maximum session duration (in hours) that you want to set for the specified role.This setting can have a value from 1 hour to 12 hours.' # noqa: E501
+)
+
create_container = VerticalContainer('Create Role', 'create_role', fields=[
FormField('Role name', 'role_name', validators=[Validator.RGW_ROLE_NAME]),
FormField('Path', 'role_path', validators=[Validator.RGW_ROLE_PATH]),
@@ -719,37 +807,67 @@ create_container = VerticalContainer('Create Role', 'create_role', fields=[
field_type='textarea',
validators=[Validator.JSON]),
])
-create_role_form = Form(path='/rgw/roles/create',
+
+edit_container = VerticalContainer('Edit Role', 'edit_role', fields=[
+ FormField('Role name', 'role_name', readonly=True),
+ FormField('Max Session Duration', 'max_session_duration',
+ help=max_session_duration_help,
+ validators=[Validator.RGW_ROLE_SESSION_DURATION])
+])
+
+create_role_form = Form(path='/create',
root_container=create_container,
task_info=FormTaskInfo("IAM RGW Role '{role_name}' created successfully",
['role_name']),
method_type=MethodType.POST.value)
+edit_role_form = Form(path='/edit',
+ root_container=edit_container,
+ task_info=FormTaskInfo("IAM RGW Role '{role_name}' edited successfully",
+ ['role_name']),
+ method_type=MethodType.PUT.value,
+ model_callback=RGWRoleEndpoints.model)
+
@CRUDEndpoint(
router=APIRouter('/rgw/roles', Scope.RGW),
doc=APIDoc("List of RGW roles", "RGW"),
actions=[
TableAction(name='Create', permission='create', icon=Icon.ADD.value,
- routerLink='/rgw/roles/create')
+ routerLink='/rgw/roles/create'),
+ TableAction(name='Edit', permission='update', icon=Icon.EDIT.value,
+ click='edit', routerLink='/rgw/roles/edit'),
+ TableAction(name='Delete', permission='delete', icon=Icon.DESTROY.value,
+ click='delete', disable=True),
],
- forms=[create_role_form],
- permissions=[Scope.CONFIG_OPT],
+ forms=[create_role_form, edit_role_form],
+ column_key='RoleName',
+ resource='Role',
+ permissions=[Scope.RGW],
get_all=CRUDCollectionMethod(
func=RGWRoleEndpoints.role_list,
doc=EndpointDoc("List RGW roles")
),
create=CRUDCollectionMethod(
func=RGWRoleEndpoints.role_create,
- doc=EndpointDoc("Create Ceph User")
+ doc=EndpointDoc("Create RGW role")
+ ),
+ edit=CRUDCollectionMethod(
+ func=RGWRoleEndpoints.role_update,
+ doc=EndpointDoc("Edit RGW role")
+ ),
+ delete=CRUDCollectionMethod(
+ func=RGWRoleEndpoints.role_delete,
+ doc=EndpointDoc("Delete RGW role")
),
set_column={
"CreateDate": {'cellTemplate': 'date'},
"MaxSessionDuration": {'cellTemplate': 'duration'},
"RoleId": {'isHidden': True},
- "AssumeRolePolicyDocument": {'isHidden': True}
+ "AssumeRolePolicyDocument": {'isHidden': True},
+ "PermissionPolicies": {'isHidden': True}
},
- detail_columns=['RoleId', 'AssumeRolePolicyDocument'],
+ detail_columns=['RoleId', 'AssumeRolePolicyDocument', 'PermissionPolicies'],
meta=CRUDMeta()
)
class RgwUserRole(NamedTuple):
@@ -760,6 +878,7 @@ class RgwUserRole(NamedTuple):
CreateDate: str
MaxSessionDuration: int
AssumeRolePolicyDocument: str
+ PermissionPolicies: List
@APIRouter('/rgw/realm', Scope.RGW)