summaryrefslogtreecommitdiffstats
path: root/qa/tasks/cephfs/cephfs_test_case.py
diff options
context:
space:
mode:
Diffstat (limited to 'qa/tasks/cephfs/cephfs_test_case.py')
-rw-r--r--qa/tasks/cephfs/cephfs_test_case.py46
1 files changed, 16 insertions, 30 deletions
diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py
index d2688929c..f26b598aa 100644
--- a/qa/tasks/cephfs/cephfs_test_case.py
+++ b/qa/tasks/cephfs/cephfs_test_case.py
@@ -3,8 +3,6 @@ import logging
import os
import re
-from shlex import split as shlex_split
-
from tasks.ceph_test_case import CephTestCase
from teuthology import contextutil
@@ -96,22 +94,22 @@ class CephFSTestCase(CephTestCase):
# In case anything is in the OSD blocklist list, clear it out. This is to avoid
# the OSD map changing in the background (due to blocklist expiry) while tests run.
try:
- self.mds_cluster.mon_manager.run_cluster_cmd(args="osd blocklist clear")
+ self.run_ceph_cmd("osd blocklist clear")
except CommandFailedError:
# Fallback for older Ceph cluster
try:
- blocklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd",
- "dump", "--format=json-pretty"))['blocklist']
+ blocklist = json.loads(self.get_ceph_cmd_stdout("osd",
+ "dump", "--format=json-pretty"))['blocklist']
log.info(f"Removing {len(blocklist)} blocklist entries")
for addr, blocklisted_at in blocklist.items():
- self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blocklist", "rm", addr)
+ self.run_ceph_cmd("osd", "blocklist", "rm", addr)
except KeyError:
# Fallback for more older Ceph clusters, who will use 'blacklist' instead.
- blacklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd",
- "dump", "--format=json-pretty"))['blacklist']
+ blacklist = json.loads(self.get_ceph_cmd_stdout("osd",
+ "dump", "--format=json-pretty"))['blacklist']
log.info(f"Removing {len(blacklist)} blacklist entries")
for addr, blocklisted_at in blacklist.items():
- self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blacklist", "rm", addr)
+ self.run_ceph_cmd("osd", "blacklist", "rm", addr)
def setUp(self):
super(CephFSTestCase, self).setUp()
@@ -160,7 +158,7 @@ class CephFSTestCase(CephTestCase):
for entry in self.auth_list():
ent_type, ent_id = entry['entity'].split(".")
if ent_type == "client" and ent_id not in client_mount_ids and not (ent_id == "admin" or ent_id[:6] == 'mirror'):
- self.mds_cluster.mon_manager.raw_cluster_cmd("auth", "del", entry['entity'])
+ self.run_ceph_cmd("auth", "del", entry['entity'])
if self.REQUIRE_FILESYSTEM:
self.fs = self.mds_cluster.newfs(create=True)
@@ -171,11 +169,11 @@ class CephFSTestCase(CephTestCase):
'osd', f'allow rw tag cephfs data={self.fs.name}',
'mds', 'allow']
- if self.run_cluster_cmd_result(cmd) == 0:
+ if self.get_ceph_cmd_result(*cmd) == 0:
break
cmd[1] = 'add'
- if self.run_cluster_cmd_result(cmd) != 0:
+ if self.get_ceph_cmd_result(*cmd) != 0:
raise RuntimeError(f'Failed to create new client {cmd[2]}')
# wait for ranks to become active
@@ -188,9 +186,8 @@ class CephFSTestCase(CephTestCase):
if self.REQUIRE_BACKUP_FILESYSTEM:
if not self.REQUIRE_FILESYSTEM:
self.skipTest("backup filesystem requires a primary filesystem as well")
- self.fs.mon_manager.raw_cluster_cmd('fs', 'flag', 'set',
- 'enable_multiple', 'true',
- '--yes-i-really-mean-it')
+ self.run_ceph_cmd('fs', 'flag', 'set', 'enable_multiple', 'true',
+ '--yes-i-really-mean-it')
self.backup_fs = self.mds_cluster.newfs(name="backup_fs")
self.backup_fs.wait_for_daemons()
@@ -226,9 +223,8 @@ class CephFSTestCase(CephTestCase):
"""
Convenience wrapper on "ceph auth ls"
"""
- return json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd(
- "auth", "ls", "--format=json-pretty"
- ))['auth_dump']
+ return json.loads(self.get_ceph_cmd_stdout("auth", "ls",
+ "--format=json-pretty"))['auth_dump']
def assert_session_count(self, expected, ls_data=None, mds_id=None):
if ls_data is None:
@@ -411,16 +407,6 @@ class CephFSTestCase(CephTestCase):
except contextutil.MaxWhileTries as e:
raise RuntimeError("rank {0} failed to reach desired subtree state".format(rank)) from e
- def run_cluster_cmd(self, cmd):
- if isinstance(cmd, str):
- cmd = shlex_split(cmd)
- return self.fs.mon_manager.raw_cluster_cmd(*cmd)
-
- def run_cluster_cmd_result(self, cmd):
- if isinstance(cmd, str):
- cmd = shlex_split(cmd)
- return self.fs.mon_manager.raw_cluster_cmd_result(*cmd)
-
def create_client(self, client_id, moncap=None, osdcap=None, mdscap=None):
if not (moncap or osdcap or mdscap):
if self.fs:
@@ -438,5 +424,5 @@ class CephFSTestCase(CephTestCase):
if mdscap:
cmd += ['mds', mdscap]
- self.run_cluster_cmd(cmd)
- return self.run_cluster_cmd(f'auth get {self.client_name}')
+ self.run_ceph_cmd(*cmd)
+ return self.run_ceph_cmd(f'auth get {self.client_name}')