summaryrefslogtreecommitdiffstats
path: root/src/pybind/mgr/dashboard/controllers/cephfs.py
diff options
context:
space:
mode:
Diffstat (limited to 'src/pybind/mgr/dashboard/controllers/cephfs.py')
-rw-r--r--src/pybind/mgr/dashboard/controllers/cephfs.py422
1 files changed, 397 insertions, 25 deletions
diff --git a/src/pybind/mgr/dashboard/controllers/cephfs.py b/src/pybind/mgr/dashboard/controllers/cephfs.py
index 09b2bebfc..fcd87833f 100644
--- a/src/pybind/mgr/dashboard/controllers/cephfs.py
+++ b/src/pybind/mgr/dashboard/controllers/cephfs.py
@@ -1,9 +1,11 @@
# -*- coding: utf-8 -*-
+# pylint: disable=too-many-lines
+import errno
import json
import logging
import os
from collections import defaultdict
-from typing import Any, Dict
+from typing import Any, Dict, List
import cephfs
import cherrypy
@@ -22,6 +24,11 @@ GET_QUOTAS_SCHEMA = {
'max_bytes': (int, ''),
'max_files': (int, '')
}
+GET_STATFS_SCHEMA = {
+ 'bytes': (int, ''),
+ 'files': (int, ''),
+ 'subdirs': (int, '')
+}
logger = logging.getLogger("controllers.rgw")
@@ -49,7 +56,7 @@ class CephFS(RESTController):
service_spec_str = service_spec_str[:-1]
if 'hosts' in service_spec['placement']:
for host in service_spec['placement']['hosts']:
- service_spec_str += f'{host},'
+ service_spec_str += f'{host} '
service_spec_str = service_spec_str[:-1]
error_code, _, err = mgr.remote('volumes', '_cmd_fs_volume_create', None,
@@ -94,6 +101,29 @@ class CephFS(RESTController):
component='cephfs')
return f'Volume {name} renamed successfully to {new_name}'
+ @UpdatePermission
+ @Endpoint('PUT')
+ @EndpointDoc("Set Ceph authentication capabilities for the specified user ID in the given path",
+ parameters={
+ 'fs_name': (str, 'File system name'),
+ 'client_id': (str, 'Cephx user ID'),
+ 'caps': (str, 'Path and given capabilities'),
+ 'root_squash': (str, 'File System Identifier'),
+
+ })
+ def auth(self, fs_name: str, client_id: int, caps: List[str], root_squash: bool):
+ if root_squash:
+ caps.insert(2, 'root_squash')
+ error_code, _, err = mgr.mon_command({'prefix': 'fs authorize',
+ 'filesystem': fs_name,
+ 'entity': client_id,
+ 'caps': caps})
+ if error_code != 0:
+ raise DashboardException(
+ msg=f'Error setting authorization for {client_id} with {caps}: {err}',
+ component='cephfs')
+ return f'Updated {client_id} authorization successfully'
+
def get(self, fs_id):
fs_id = self.fs_id_to_int(fs_id)
return self.fs_status(fs_id)
@@ -330,13 +360,16 @@ class CephFS(RESTController):
standby_table = self.get_standby_table(fsmap['standbys'], mds_versions)
+ flags = mdsmap['flags_state']
+
return {
"cephfs": {
"id": fs_id,
"name": mdsmap['fs_name'],
"client_count": client_count,
"ranks": rank_table,
- "pools": pools_table
+ "pools": pools_table,
+ "flags": flags,
},
"standbys": standby_table,
"versions": mds_versions
@@ -359,7 +392,7 @@ class CephFS(RESTController):
"No cephfs with id {0}".format(fs_id))
# Decorate the metadata with some fields that will be
- # indepdendent of whether it's a kernel or userspace
+ # independent of whether it's a kernel or userspace
# client, so that the javascript doesn't have to grok that.
for client in clients:
if "ceph_version" in client['client_metadata']: # pragma: no cover - no complexity
@@ -518,6 +551,47 @@ class CephFS(RESTController):
cfs = self._cephfs_instance(fs_id)
return cfs.get_quotas(path)
+ @RESTController.Resource('POST', path='/write_to_file')
+ @allow_empty_body
+ def write_to_file(self, fs_id, path, buf) -> None:
+ """
+ Write some data to the specified path.
+ :param fs_id: The filesystem identifier.
+ :param path: The path of the file to write.
+ :param buf: The str to write to the buf.
+ """
+ cfs = self._cephfs_instance(fs_id)
+ cfs.write_to_file(path, buf)
+
+ @RESTController.Resource('DELETE', path='/unlink')
+ def unlink(self, fs_id, path) -> None:
+ """
+ Removes a file, link, or symbolic link.
+ :param fs_id: The filesystem identifier.
+ :param path: The path of the file or link to unlink.
+ """
+ cfs = self._cephfs_instance(fs_id)
+ cfs.unlink(path)
+
+ @RESTController.Resource('GET', path='/statfs')
+ @EndpointDoc("Get Cephfs statfs of the specified path",
+ parameters={
+ 'fs_id': (str, 'File System Identifier'),
+ 'path': (str, 'File System Path'),
+ },
+ responses={200: GET_STATFS_SCHEMA})
+ def statfs(self, fs_id, path) -> dict:
+ """
+ Get the statfs of the specified path.
+ :param fs_id: The filesystem identifier.
+ :param path: The path of the directory/file.
+ :return: Returns a dictionary containing 'bytes',
+ 'files' and 'subdirs'.
+ :rtype: dict
+ """
+ cfs = self._cephfs_instance(fs_id)
+ return cfs.statfs(path)
+
@RESTController.Resource('POST', path='/snapshot')
@allow_empty_body
def snapshot(self, fs_id, path, name=None):
@@ -560,7 +634,11 @@ class CephFSClients(object):
@ViewCache()
def get(self):
- return CephService.send_command('mds', 'session ls', srv_spec='{0}:0'.format(self.fscid))
+ try:
+ ret = CephService.send_command('mds', 'session ls', srv_spec='{0}:0'.format(self.fscid))
+ except RuntimeError:
+ ret = []
+ return ret
@UIRouter('/cephfs', Scope.CEPHFS)
@@ -623,7 +701,7 @@ class CephFsUi(CephFS):
@APIDoc('CephFS Subvolume Management API', 'CephFSSubvolume')
class CephFSSubvolume(RESTController):
- def get(self, vol_name: str, group_name: str = ""):
+ def get(self, vol_name: str, group_name: str = "", info=True):
params = {'vol_name': vol_name}
if group_name:
params['group_name'] = group_name
@@ -634,15 +712,23 @@ class CephFSSubvolume(RESTController):
f'Failed to list subvolumes for volume {vol_name}: {err}'
)
subvolumes = json.loads(out)
- for subvolume in subvolumes:
- params['sub_name'] = subvolume['name']
- error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolume_info', None,
- params)
- if error_code != 0:
- raise DashboardException(
- f'Failed to get info for subvolume {subvolume["name"]}: {err}'
- )
- subvolume['info'] = json.loads(out)
+
+ if info:
+ for subvolume in subvolumes:
+ params['sub_name'] = subvolume['name']
+ error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolume_info', None,
+ params)
+ # just ignore this error for now so the subvolumes page will load.
+ # the ideal solution is to implement a status page where clone status
+ # can be displayed
+ if error_code == -errno.EAGAIN:
+ pass
+ elif error_code != 0:
+ raise DashboardException(
+ f'Failed to get info for subvolume {subvolume["name"]}: {err}'
+ )
+ if out:
+ subvolume['info'] = json.loads(out)
return subvolumes
@RESTController.Resource('GET')
@@ -699,12 +785,27 @@ class CephFSSubvolume(RESTController):
component='cephfs')
return f'Subvolume {subvol_name} removed successfully'
+ @RESTController.Resource('GET')
+ def exists(self, vol_name: str, group_name=''):
+ params = {'vol_name': vol_name}
+ if group_name:
+ params['group_name'] = group_name
+ error_code, out, err = mgr.remote(
+ 'volumes', '_cmd_fs_subvolume_exist', None, params)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to check if subvolume exists: {err}'
+ )
+ if out == 'no subvolume exists':
+ return False
+ return True
+
@APIRouter('/cephfs/subvolume/group', Scope.CEPHFS)
@APIDoc("Cephfs Subvolume Group Management API", "CephfsSubvolumeGroup")
class CephFSSubvolumeGroups(RESTController):
- def get(self, vol_name):
+ def get(self, vol_name, info=True):
if not vol_name:
raise DashboardException(
f'Error listing subvolume groups for {vol_name}')
@@ -714,15 +815,17 @@ class CephFSSubvolumeGroups(RESTController):
raise DashboardException(
f'Error listing subvolume groups for {vol_name}')
subvolume_groups = json.loads(out)
- for group in subvolume_groups:
- error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolumegroup_info',
- None, {'vol_name': vol_name,
- 'group_name': group['name']})
- if error_code != 0:
- raise DashboardException(
- f'Failed to get info for subvolume group {group["name"]}: {err}'
- )
- group['info'] = json.loads(out)
+
+ if info:
+ for group in subvolume_groups:
+ error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolumegroup_info',
+ None, {'vol_name': vol_name,
+ 'group_name': group['name']})
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to get info for subvolume group {group["name"]}: {err}'
+ )
+ group['info'] = json.loads(out)
return subvolume_groups
@RESTController.Resource('GET')
@@ -763,3 +866,272 @@ class CephFSSubvolumeGroups(RESTController):
f'Failed to delete subvolume group {group_name}: {err}'
)
return f'Subvolume group {group_name} removed successfully'
+
+
+@APIRouter('/cephfs/subvolume/snapshot', Scope.CEPHFS)
+@APIDoc("Cephfs Subvolume Snapshot Management API", "CephfsSubvolumeSnapshot")
+class CephFSSubvolumeSnapshots(RESTController):
+ def get(self, vol_name: str, subvol_name, group_name: str = '', info=True):
+ params = {'vol_name': vol_name, 'sub_name': subvol_name}
+ if group_name:
+ params['group_name'] = group_name
+ error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolume_snapshot_ls', None,
+ params)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to list subvolume snapshots for subvolume {subvol_name}: {err}'
+ )
+ snapshots = json.loads(out)
+
+ if info:
+ for snapshot in snapshots:
+ params['snap_name'] = snapshot['name']
+ error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolume_snapshot_info',
+ None, params)
+ # just ignore this error for now so the subvolumes page will load.
+ # the ideal solution is to implement a status page where clone status
+ # can be displayed
+ if error_code == -errno.EAGAIN:
+ pass
+ elif error_code != 0:
+ raise DashboardException(
+ f'Failed to get info for subvolume snapshot {snapshot["name"]}: {err}'
+ )
+ if out:
+ snapshot['info'] = json.loads(out)
+ return snapshots
+
+ @RESTController.Resource('GET')
+ def info(self, vol_name: str, subvol_name: str, snap_name: str, group_name: str = ''):
+ params = {'vol_name': vol_name, 'sub_name': subvol_name, 'snap_name': snap_name}
+ if group_name:
+ params['group_name'] = group_name
+ error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolume_snapshot_info', None,
+ params)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to get info for subvolume snapshot {snap_name}: {err}'
+ )
+ return json.loads(out)
+
+ def create(self, vol_name: str, subvol_name: str, snap_name: str, group_name=''):
+ params = {'vol_name': vol_name, 'sub_name': subvol_name, 'snap_name': snap_name}
+ if group_name:
+ params['group_name'] = group_name
+
+ error_code, _, err = mgr.remote('volumes', '_cmd_fs_subvolume_snapshot_create', None,
+ params)
+
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to create subvolume snapshot {snap_name}: {err}'
+ )
+ return f'Subvolume snapshot {snap_name} created successfully'
+
+ def delete(self, vol_name: str, subvol_name: str, snap_name: str, group_name='', force=True):
+ params = {'vol_name': vol_name, 'sub_name': subvol_name, 'snap_name': snap_name}
+ if group_name:
+ params['group_name'] = group_name
+ params['force'] = str_to_bool(force)
+ error_code, _, err = mgr.remote('volumes', '_cmd_fs_subvolume_snapshot_rm', None,
+ params)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to delete subvolume snapshot {snap_name}: {err}'
+ )
+ return f'Subvolume snapshot {snap_name} removed successfully'
+
+
+@APIRouter('/cephfs/subvolume/snapshot/clone', Scope.CEPHFS)
+@APIDoc("Cephfs Snapshot Clone Management API", "CephfsSnapshotClone")
+class CephFsSnapshotClone(RESTController):
+ @EndpointDoc("Create a clone of a subvolume snapshot")
+ def create(self, vol_name: str, subvol_name: str, snap_name: str, clone_name: str,
+ group_name='', target_group_name=''):
+ params = {'vol_name': vol_name, 'sub_name': subvol_name, 'snap_name': snap_name,
+ 'target_sub_name': clone_name}
+ if group_name:
+ params['group_name'] = group_name
+
+ if target_group_name:
+ params['target_group_name'] = target_group_name
+
+ error_code, _, err = mgr.remote('volumes', '_cmd_fs_subvolume_snapshot_clone', None,
+ params)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to create clone {clone_name}: {err}'
+ )
+ return f'Clone {clone_name} created successfully'
+
+
+@APIRouter('/cephfs/snapshot/schedule', Scope.CEPHFS)
+@APIDoc("Cephfs Snapshot Scheduling API", "CephFSSnapshotSchedule")
+class CephFSSnapshotSchedule(RESTController):
+
+ def list(self, fs: str, path: str = '/', recursive: bool = True):
+ error_code, out, err = mgr.remote('snap_schedule', 'snap_schedule_list',
+ path, recursive, fs, None, None, 'plain')
+ if len(out) == 0:
+ return []
+
+ snapshot_schedule_list = out.split('\n')
+ output: List[Any] = []
+
+ for snap in snapshot_schedule_list:
+ current_path = snap.strip().split(' ')[0]
+ error_code, status_out, err = mgr.remote('snap_schedule', 'snap_schedule_get',
+ current_path, fs, None, None, 'json')
+ output = output + json.loads(status_out)
+
+ output_json = json.dumps(output)
+
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to get list of snapshot schedules for path {path}: {err}'
+ )
+ return json.loads(output_json)
+
+ def create(self, fs: str, path: str, snap_schedule: str, start: str, retention_policy=None,
+ subvol=None, group=None):
+ error_code, _, err = mgr.remote('snap_schedule',
+ 'snap_schedule_add',
+ path,
+ snap_schedule,
+ start,
+ fs,
+ subvol,
+ group)
+
+ if retention_policy:
+ retention_policies = retention_policy.split('|')
+ for retention in retention_policies:
+ retention_count = retention.split('-')[0]
+ retention_spec_or_period = retention.split('-')[1]
+ error_code_retention, _, err_retention = mgr.remote('snap_schedule',
+ 'snap_schedule_retention_add',
+ path,
+ retention_spec_or_period,
+ retention_count,
+ fs,
+ subvol,
+ group)
+ if error_code_retention != 0:
+ raise DashboardException(
+ f'Failed to add retention policy for path {path}: {err_retention}'
+ )
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to create snapshot schedule for path {path}: {err}'
+ )
+
+ return f'Snapshot schedule for path {path} created successfully'
+
+ def set(self, fs: str, path: str, retention_to_add=None, retention_to_remove=None,
+ subvol=None, group=None):
+ def editRetentionPolicies(method, retention_policy):
+ if not retention_policy:
+ return
+
+ retention_policies = retention_policy.split('|')
+ for retention in retention_policies:
+ retention_count = retention.split('-')[0]
+ retention_spec_or_period = retention.split('-')[1]
+ error_code_retention, _, err_retention = mgr.remote('snap_schedule',
+ method,
+ path,
+ retention_spec_or_period,
+ retention_count,
+ fs,
+ subvol,
+ group)
+ if error_code_retention != 0:
+ raise DashboardException(
+ f'Failed to add/remove retention policy for path {path}: {err_retention}'
+ )
+
+ editRetentionPolicies('snap_schedule_retention_rm', retention_to_remove)
+ editRetentionPolicies('snap_schedule_retention_add', retention_to_add)
+
+ return f'Retention policies for snapshot schedule on path {path} updated successfully'
+
+ @RESTController.Resource('DELETE')
+ def delete_snapshot(self, fs: str, path: str, schedule: str, start: str,
+ retention_policy=None, subvol=None, group=None):
+ if retention_policy:
+ # check if there are other snap schedules for this exact same path
+ error_code, out, err = mgr.remote('snap_schedule', 'snap_schedule_list',
+ path, False, fs, subvol, group, 'plain')
+
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to get snapshot schedule list for path {path}: {err}'
+ )
+ # only remove the retention policies if there no other snap schedules for this path
+ snapshot_schedule_list = out.split('\n')
+ if len(snapshot_schedule_list) <= 1:
+ retention_policies = retention_policy.split('|')
+ for retention in retention_policies:
+ retention_count = retention.split('-')[0]
+ retention_spec_or_period = retention.split('-')[1]
+ error_code, _, err = mgr.remote('snap_schedule',
+ 'snap_schedule_retention_rm',
+ path,
+ retention_spec_or_period,
+ retention_count,
+ fs,
+ subvol,
+ group)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to remove retention policy for path {path}: {err}'
+ )
+ # remove snap schedule
+ error_code, _, err = mgr.remote('snap_schedule',
+ 'snap_schedule_rm',
+ path,
+ schedule,
+ start,
+ fs,
+ subvol,
+ group)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to delete snapshot schedule for path {path}: {err}'
+ )
+
+ return f'Snapshot schedule for path {path} deleted successfully'
+
+ @RESTController.Resource('POST')
+ def deactivate(self, fs: str, path: str, schedule: str, start: str, subvol=None, group=None):
+ error_code, _, err = mgr.remote('snap_schedule',
+ 'snap_schedule_deactivate',
+ path,
+ schedule,
+ start,
+ fs,
+ subvol,
+ group)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to deactivate snapshot schedule for path {path}: {err}'
+ )
+
+ return f'Snapshot schedule for path {path} deactivated successfully'
+
+ @RESTController.Resource('POST')
+ def activate(self, fs: str, path: str, schedule: str, start: str, subvol=None, group=None):
+ error_code, _, err = mgr.remote('snap_schedule',
+ 'snap_schedule_activate',
+ path,
+ schedule,
+ start,
+ fs,
+ subvol,
+ group)
+ if error_code != 0:
+ raise DashboardException(
+ f'Failed to activate snapshot schedule for path {path}: {err}'
+ )
+
+ return f'Snapshot schedule for path {path} activated successfully'