summaryrefslogtreecommitdiffstats
path: root/src/pybind/mgr/volumes/fs/volume.py
diff options
context:
space:
mode:
Diffstat (limited to 'src/pybind/mgr/volumes/fs/volume.py')
-rw-r--r--src/pybind/mgr/volumes/fs/volume.py24
1 files changed, 15 insertions, 9 deletions
diff --git a/src/pybind/mgr/volumes/fs/volume.py b/src/pybind/mgr/volumes/fs/volume.py
index 5c6642444..2e96f8306 100644
--- a/src/pybind/mgr/volumes/fs/volume.py
+++ b/src/pybind/mgr/volumes/fs/volume.py
@@ -1,7 +1,6 @@
import json
import errno
import logging
-import os
import mgr_util
from typing import TYPE_CHECKING
@@ -14,13 +13,14 @@ from .fs_util import listdir, has_subdir
from .operations.group import open_group, create_group, remove_group, \
open_group_unique, set_group_attrs
from .operations.volume import create_volume, delete_volume, rename_volume, \
- list_volumes, open_volume, get_pool_names, get_pool_ids, get_pending_subvol_deletions_count
+ list_volumes, open_volume, get_pool_names, get_pool_ids, \
+ get_pending_subvol_deletions_count, get_all_pending_clones_count
from .operations.subvolume import open_subvol, create_subvol, remove_subvol, \
create_clone
-from .operations.trash import Trash
from .vol_spec import VolSpec
-from .exception import VolumeException, ClusterError, ClusterTimeout, EvictionError
+from .exception import VolumeException, ClusterError, ClusterTimeout, \
+ EvictionError
from .async_cloner import Cloner
from .purge_queue import ThreadPoolPurgeQueueMixin
from .operations.template import SubvolumeOpType
@@ -55,7 +55,8 @@ class VolumeClient(CephfsClient["Module"]):
super().__init__(mgr)
# volume specification
self.volspec = VolSpec(mgr.rados.conf_get('client_snapdir'))
- self.cloner = Cloner(self, self.mgr.max_concurrent_clones, self.mgr.snapshot_clone_delay)
+ self.cloner = Cloner(self, self.mgr.max_concurrent_clones, self.mgr.snapshot_clone_delay,
+ self.mgr.snapshot_clone_no_wait)
self.purge_queue = ThreadPoolPurgeQueueMixin(self, 4)
# on startup, queue purge job for available volumes to kickstart
# purge for leftover subvolume entries in trash. note that, if the
@@ -338,7 +339,7 @@ class VolumeClient(CephfsClient["Module"]):
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.EVICT) as subvolume:
- key = subvolume.evict(volname, authid)
+ subvolume.evict(volname, authid)
ret = 0, "", ""
except (VolumeException, ClusterTimeout, ClusterError, EvictionError) as e:
if isinstance(e, VolumeException):
@@ -424,6 +425,7 @@ class VolumeClient(CephfsClient["Module"]):
subvol_info_dict = subvolume.info()
subvol_info_dict["mon_addrs"] = mon_addr_lst
+ subvol_info_dict["flavor"] = subvolume.VERSION
ret = 0, json.dumps(subvol_info_dict, indent=4, sort_keys=True), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
@@ -695,7 +697,7 @@ class VolumeClient(CephfsClient["Module"]):
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
- with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_PROTECT) as subvolume:
+ with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_PROTECT):
log.warning("snapshot protect call is deprecated and will be removed in a future release")
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
@@ -710,7 +712,7 @@ class VolumeClient(CephfsClient["Module"]):
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
- with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_UNPROTECT) as subvolume:
+ with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_UNPROTECT):
log.warning("snapshot unprotect call is deprecated and will be removed in a future release")
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
@@ -765,6 +767,10 @@ class VolumeClient(CephfsClient["Module"]):
s_groupname = kwargs['group_name']
try:
+ if self.mgr.snapshot_clone_no_wait and \
+ get_all_pending_clones_count(self, self.mgr, self.volspec) >= self.mgr.max_concurrent_clones:
+ raise(VolumeException(-errno.EAGAIN, "all cloner threads are busy, please try again later"))
+
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, s_groupname) as s_group:
with open_subvol(self.mgr, fs_handle, self.volspec, s_group, s_subvolname, SubvolumeOpType.CLONE_SOURCE) as s_subvolume:
@@ -962,7 +968,7 @@ class VolumeClient(CephfsClient["Module"]):
try:
with open_volume(self, volname) as fs_handle:
- with open_group(fs_handle, self.volspec, groupname) as group:
+ with open_group(fs_handle, self.volspec, groupname):
# as subvolumes are marked with the vxattr ceph.dir.subvolume deny snapshots
# at the subvolume group (see: https://tracker.ceph.com/issues/46074)
# group.create_snapshot(snapname)