summaryrefslogtreecommitdiffstats
path: root/qa/tasks/cephfs/test_admin.py
diff options
context:
space:
mode:
Diffstat (limited to 'qa/tasks/cephfs/test_admin.py')
-rw-r--r--qa/tasks/cephfs/test_admin.py377
1 files changed, 263 insertions, 114 deletions
diff --git a/qa/tasks/cephfs/test_admin.py b/qa/tasks/cephfs/test_admin.py
index 9890381c6..4f3100bbe 100644
--- a/qa/tasks/cephfs/test_admin.py
+++ b/qa/tasks/cephfs/test_admin.py
@@ -7,6 +7,7 @@ from io import StringIO
from os.path import join as os_path_join
from teuthology.exceptions import CommandFailedError
+from teuthology.contextutil import safe_while
from tasks.cephfs.cephfs_test_case import CephFSTestCase, classhook
from tasks.cephfs.filesystem import FileLayout, FSMissing
@@ -15,6 +16,58 @@ from tasks.cephfs.caps_helper import CapTester
log = logging.getLogger(__name__)
+class TestLabeledPerfCounters(CephFSTestCase):
+ CLIENTS_REQUIRED = 2
+ MDSS_REQUIRED = 1
+
+ def test_per_client_labeled_perf_counters(self):
+ """
+ That the per-client labelled perf counters depict the clients
+ performaing IO.
+ """
+ def get_counters_for(filesystem, client_id):
+ dump = self.fs.rank_tell(["counter", "dump"])
+ per_client_metrics_key = f'mds_client_metrics-{filesystem}'
+ counters = [c["counters"] for \
+ c in dump[per_client_metrics_key] if c["labels"]["client"] == client_id]
+ return counters[0]
+
+ # sleep a bit so that we get updated clients...
+ time.sleep(10)
+
+ # lookout for clients...
+ dump = self.fs.rank_tell(["counter", "dump"])
+
+ fs_suffix = dump["mds_client_metrics"][0]["labels"]["fs_name"]
+ self.assertGreaterEqual(dump["mds_client_metrics"][0]["counters"]["num_clients"], 2)
+
+ per_client_metrics_key = f'mds_client_metrics-{fs_suffix}'
+ mount_a_id = f'client.{self.mount_a.get_global_id()}'
+ mount_b_id = f'client.{self.mount_b.get_global_id()}'
+
+ clients = [c["labels"]["client"] for c in dump[per_client_metrics_key]]
+ self.assertIn(mount_a_id, clients)
+ self.assertIn(mount_b_id, clients)
+
+ # write workload
+ self.mount_a.create_n_files("test_dir/test_file", 1000, sync=True)
+ with safe_while(sleep=1, tries=30, action=f'wait for counters - {mount_a_id}') as proceed:
+ counters_dump_a = get_counters_for(fs_suffix, mount_a_id)
+ while proceed():
+ if counters_dump_a["total_write_ops"] > 0 and counters_dump_a["total_write_size"] > 0:
+ return True
+
+ # read from the other client
+ for i in range(100):
+ self.mount_b.open_background(basename=f'test_dir/test_file_{i}', write=False)
+ with safe_while(sleep=1, tries=30, action=f'wait for counters - {mount_b_id}') as proceed:
+ counters_dump_b = get_counters_for(fs_suffix, mount_b_id)
+ while proceed():
+ if counters_dump_b["total_read_ops"] > 0 and counters_dump_b["total_read_size"] > 0:
+ return True
+
+ self.fs.teardown()
+
class TestAdminCommands(CephFSTestCase):
"""
Tests for administration command.
@@ -24,18 +77,18 @@ class TestAdminCommands(CephFSTestCase):
MDSS_REQUIRED = 1
def check_pool_application_metadata_key_value(self, pool, app, key, value):
- output = self.fs.mon_manager.raw_cluster_cmd(
+ output = self.get_ceph_cmd_stdout(
'osd', 'pool', 'application', 'get', pool, app, key)
self.assertEqual(str(output.strip()), value)
def setup_ec_pools(self, n, metadata=True, overwrites=True):
if metadata:
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-meta", "8")
+ self.run_ceph_cmd('osd', 'pool', 'create', n+"-meta", "8")
cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
- self.fs.mon_manager.raw_cluster_cmd(*cmd)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
+ self.run_ceph_cmd(cmd)
+ self.run_ceph_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
if overwrites:
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
+ self.run_ceph_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
@classhook('_add_valid_tell')
class TestValidTell(TestAdminCommands):
@@ -76,13 +129,13 @@ class TestFsStatus(TestAdminCommands):
That `ceph fs status` command functions.
"""
- s = self.fs.mon_manager.raw_cluster_cmd("fs", "status")
+ s = self.get_ceph_cmd_stdout("fs", "status")
self.assertTrue("active" in s)
- mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json-pretty"))["mdsmap"]
+ mdsmap = json.loads(self.get_ceph_cmd_stdout("fs", "status", "--format=json-pretty"))["mdsmap"]
self.assertEqual(mdsmap[0]["state"], "active")
- mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json"))["mdsmap"]
+ mdsmap = json.loads(self.get_ceph_cmd_stdout("fs", "status", "--format=json"))["mdsmap"]
self.assertEqual(mdsmap[0]["state"], "active")
@@ -104,7 +157,7 @@ class TestAddDataPool(TestAdminCommands):
That the application metadata set on a newly added data pool is as expected.
"""
pool_name = "foo"
- mon_cmd = self.fs.mon_manager.raw_cluster_cmd
+ mon_cmd = self.get_ceph_cmd_stdout
mon_cmd('osd', 'pool', 'create', pool_name, '--pg_num_min',
str(self.fs.pg_num_min))
# Check whether https://tracker.ceph.com/issues/43061 is fixed
@@ -148,22 +201,22 @@ class TestAddDataPool(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
# try to add 'first_data_pool' with 'second_fs'
# Expecting EINVAL exit status because 'first_data_pool' is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', second_fs, first_data_pool)
+ self.run_ceph_cmd('fs', 'add_data_pool', second_fs, first_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -178,23 +231,23 @@ class TestAddDataPool(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
# try to add 'second_metadata_pool' with 'first_fs' as a data pool
# Expecting EINVAL exit status because 'second_metadata_pool'
# is already in use with 'second_fs' as a metadata pool
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', first_fs, second_metadata_pool)
+ self.run_ceph_cmd('fs', 'add_data_pool', first_fs, second_metadata_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -211,23 +264,21 @@ class TestFsNew(TestAdminCommands):
metapoolname, datapoolname = n+'-testmetapool', n+'-testdatapool'
badname = n+'badname@#'
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- n+metapoolname)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- n+datapoolname)
+ self.run_ceph_cmd('osd', 'pool', 'create', n+metapoolname)
+ self.run_ceph_cmd('osd', 'pool', 'create', n+datapoolname)
# test that fsname not with "goodchars" fails
args = ['fs', 'new', badname, metapoolname, datapoolname]
- proc = self.fs.mon_manager.run_cluster_cmd(args=args,stderr=StringIO(),
- check_status=False)
+ proc = self.run_ceph_cmd(args=args, stderr=StringIO(),
+ check_status=False)
self.assertIn('invalid chars', proc.stderr.getvalue().lower())
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', metapoolname,
- metapoolname,
- '--yes-i-really-really-mean-it-not-faking')
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', datapoolname,
- datapoolname,
- '--yes-i-really-really-mean-it-not-faking')
+ self.run_ceph_cmd('osd', 'pool', 'rm', metapoolname,
+ metapoolname,
+ '--yes-i-really-really-mean-it-not-faking')
+ self.run_ceph_cmd('osd', 'pool', 'rm', datapoolname,
+ datapoolname,
+ '--yes-i-really-really-mean-it-not-faking')
def test_new_default_ec(self):
"""
@@ -239,7 +290,7 @@ class TestFsNew(TestAdminCommands):
n = "test_new_default_ec"
self.setup_ec_pools(n)
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
+ self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
@@ -257,7 +308,7 @@ class TestFsNew(TestAdminCommands):
self.mds_cluster.delete_all_filesystems()
n = "test_new_default_ec_force"
self.setup_ec_pools(n)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
+ self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
def test_new_default_ec_no_overwrite(self):
"""
@@ -269,7 +320,7 @@ class TestFsNew(TestAdminCommands):
n = "test_new_default_ec_no_overwrite"
self.setup_ec_pools(n, overwrites=False)
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
+ self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
@@ -279,7 +330,7 @@ class TestFsNew(TestAdminCommands):
raise RuntimeError("expected failure")
# and even with --force !
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
+ self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
@@ -297,7 +348,7 @@ class TestFsNew(TestAdminCommands):
fs_name = "test_fs_new_pool_application"
keys = ['metadata', 'data']
pool_names = [fs_name+'-'+key for key in keys]
- mon_cmd = self.fs.mon_manager.raw_cluster_cmd
+ mon_cmd = self.get_ceph_cmd_stdout
for p in pool_names:
mon_cmd('osd', 'pool', 'create', p, '--pg_num_min', str(self.fs.pg_num_min))
mon_cmd('osd', 'pool', 'application', 'enable', p, 'cephfs')
@@ -315,8 +366,8 @@ class TestFsNew(TestAdminCommands):
keys = ['metadata', 'data']
pool_names = [fs_name+'-'+key for key in keys]
for p in pool_names:
- self.run_cluster_cmd(f'osd pool create {p}')
- self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
+ self.run_ceph_cmd(f'osd pool create {p}')
+ self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
self.fs.status().get_fsmap(fscid)
for i in range(2):
self.check_pool_application_metadata_key_value(pool_names[i], 'cephfs', keys[i], fs_name)
@@ -330,9 +381,9 @@ class TestFsNew(TestAdminCommands):
keys = ['metadata', 'data']
pool_names = [fs_name+'-'+key for key in keys]
for p in pool_names:
- self.run_cluster_cmd(f'osd pool create {p}')
- self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
- self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
+ self.run_ceph_cmd(f'osd pool create {p}')
+ self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
+ self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
self.fs.status().get_fsmap(fscid)
def test_fs_new_with_specific_id_fails_without_force_flag(self):
@@ -344,9 +395,9 @@ class TestFsNew(TestAdminCommands):
keys = ['metadata', 'data']
pool_names = [fs_name+'-'+key for key in keys]
for p in pool_names:
- self.run_cluster_cmd(f'osd pool create {p}')
+ self.run_ceph_cmd(f'osd pool create {p}')
try:
- self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid}')
+ self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid}')
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EINVAL,
"invalid error code on creating a file system with specifc ID without --force flag")
@@ -363,9 +414,9 @@ class TestFsNew(TestAdminCommands):
keys = ['metadata', 'data']
pool_names = [fs_name+'-'+key for key in keys]
for p in pool_names:
- self.run_cluster_cmd(f'osd pool create {p}')
+ self.run_ceph_cmd(f'osd pool create {p}')
try:
- self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
+ self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EINVAL,
"invalid error code on creating a file system with specifc ID that is already in use")
@@ -381,13 +432,13 @@ class TestFsNew(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
# try to create new fs 'second_fs' with following configuration
# metadata pool -> 'first_metadata_pool'
@@ -395,7 +446,7 @@ class TestFsNew(TestAdminCommands):
# Expecting EINVAL exit status because 'first_metadata_pool'
# is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, first_metadata_pool, second_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -410,13 +461,13 @@ class TestFsNew(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
# try to create new fs 'second_fs' with following configuration
# metadata pool -> 'second_metadata_pool'
@@ -424,7 +475,7 @@ class TestFsNew(TestAdminCommands):
# Expecting EINVAL exit status because 'first_data_pool'
# is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, first_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -439,9 +490,9 @@ class TestFsNew(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
@@ -451,7 +502,7 @@ class TestFsNew(TestAdminCommands):
# Expecting EINVAL exit status because 'first_metadata_pool' and 'first_data_pool'
# is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, first_metadata_pool, first_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -466,17 +517,17 @@ class TestFsNew(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
third_fs = "third_fs"
@@ -486,7 +537,7 @@ class TestFsNew(TestAdminCommands):
# Expecting EINVAL exit status because 'first_metadata_pool' and 'second_data_pool'
# is already in use with 'first_fs' and 'second_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', third_fs, first_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('fs', 'new', third_fs, first_metadata_pool, second_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -501,9 +552,9 @@ class TestFsNew(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
@@ -513,7 +564,7 @@ class TestFsNew(TestAdminCommands):
# Expecting EINVAL exit status because 'first_data_pool' and 'first_metadata_pool'
# is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_data_pool, first_metadata_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, first_data_pool, first_metadata_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -528,17 +579,17 @@ class TestFsNew(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
third_fs = "third_fs"
@@ -548,7 +599,7 @@ class TestFsNew(TestAdminCommands):
# Expecting EINVAL exit status because 'first_data_pool' and 'second_metadata_pool'
# is already in use with 'first_fs' and 'second_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', third_fs, first_data_pool, second_metadata_pool)
+ self.run_ceph_cmd('fs', 'new', third_fs, first_data_pool, second_metadata_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -561,20 +612,20 @@ class TestFsNew(TestAdminCommands):
# create pool and initialise with rbd
new_pool = "new_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', new_pool)
self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
new_fs = "new_fs"
new_data_pool = "new_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', new_data_pool)
# try to create new fs 'new_fs' with following configuration
# metadata pool -> 'new_pool' (already used by rbd app)
# data pool -> 'new_data_pool'
# Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', new_fs, new_pool, new_data_pool)
+ self.run_ceph_cmd('fs', 'new', new_fs, new_pool, new_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -587,20 +638,20 @@ class TestFsNew(TestAdminCommands):
# create pool and initialise with rbd
new_pool = "new_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', new_pool)
self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
new_fs = "new_fs"
new_metadata_pool = "new_metadata_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', new_metadata_pool)
# try to create new fs 'new_fs' with following configuration
# metadata pool -> 'new_metadata_pool'
# data pool -> 'new_pool' (already used by rbd app)
# Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', new_fs, new_metadata_pool, new_pool)
+ self.run_ceph_cmd('fs', 'new', new_fs, new_metadata_pool, new_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -628,7 +679,7 @@ class TestRenameCommand(TestAdminCommands):
new_fs_name = 'new_cephfs'
client_id = 'test_new_cephfs'
- self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+ self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
# authorize a cephx ID access to the renamed file system.
# use the ID to write to the file system.
@@ -649,7 +700,7 @@ class TestRenameCommand(TestAdminCommands):
# cleanup
self.mount_a.umount_wait()
- self.run_cluster_cmd(f'auth rm client.{client_id}')
+ self.run_ceph_cmd(f'auth rm client.{client_id}')
def test_fs_rename_idempotency(self):
"""
@@ -661,8 +712,8 @@ class TestRenameCommand(TestAdminCommands):
orig_fs_name = self.fs.name
new_fs_name = 'new_cephfs'
- self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
- self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+ self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+ self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
# original file system name does not appear in `fs ls` command
self.assertFalse(self.fs.exists())
@@ -681,10 +732,10 @@ class TestRenameCommand(TestAdminCommands):
new_fs_name = 'new_cephfs'
data_pool = self.fs.get_data_pool_name()
metadata_pool = self.fs.get_metadata_pool_name()
- self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+ self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
try:
- self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}")
+ self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EINVAL,
"invalid error code on creating a new file system with old "
@@ -694,7 +745,7 @@ class TestRenameCommand(TestAdminCommands):
"existing pools to fail.")
try:
- self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} --force")
+ self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} --force")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EINVAL,
"invalid error code on creating a new file system with old "
@@ -704,7 +755,7 @@ class TestRenameCommand(TestAdminCommands):
"existing pools, and --force flag to fail.")
try:
- self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} "
+ self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} "
"--allow-dangerous-metadata-overlay")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EINVAL,
@@ -719,7 +770,7 @@ class TestRenameCommand(TestAdminCommands):
That renaming a file system without '--yes-i-really-mean-it' flag fails.
"""
try:
- self.run_cluster_cmd(f"fs rename {self.fs.name} new_fs")
+ self.run_ceph_cmd(f"fs rename {self.fs.name} new_fs")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EPERM,
"invalid error code on renaming a file system without the "
@@ -733,7 +784,7 @@ class TestRenameCommand(TestAdminCommands):
That renaming a non-existent file system fails.
"""
try:
- self.run_cluster_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it")
+ self.run_ceph_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on renaming a non-existent fs")
else:
@@ -746,7 +797,7 @@ class TestRenameCommand(TestAdminCommands):
self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True)
try:
- self.run_cluster_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it")
+ self.run_ceph_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EINVAL,
"invalid error code on renaming to a fs name that is already in use")
@@ -760,14 +811,14 @@ class TestRenameCommand(TestAdminCommands):
orig_fs_name = self.fs.name
new_fs_name = 'new_cephfs'
- self.run_cluster_cmd(f'fs mirror enable {orig_fs_name}')
+ self.run_ceph_cmd(f'fs mirror enable {orig_fs_name}')
try:
- self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+ self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EPERM, "invalid error code on renaming a mirrored file system")
else:
self.fail("expected renaming of a mirrored file system to fail")
- self.run_cluster_cmd(f'fs mirror disable {orig_fs_name}')
+ self.run_ceph_cmd(f'fs mirror disable {orig_fs_name}')
class TestDump(CephFSTestCase):
@@ -851,13 +902,13 @@ class TestRequiredClientFeatures(CephFSTestCase):
"""
def is_required(index):
- out = self.fs.mon_manager.raw_cluster_cmd('fs', 'get', self.fs.name, '--format=json-pretty')
+ out = self.get_ceph_cmd_stdout('fs', 'get', self.fs.name, '--format=json-pretty')
features = json.loads(out)['mdsmap']['required_client_features']
if "feature_{0}".format(index) in features:
return True;
return False;
- features = json.loads(self.fs.mon_manager.raw_cluster_cmd('fs', 'feature', 'ls', '--format=json-pretty'))
+ features = json.loads(self.get_ceph_cmd_stdout('fs', 'feature', 'ls', '--format=json-pretty'))
self.assertGreater(len(features), 0);
for f in features:
@@ -1063,7 +1114,7 @@ class TestConfigCommands(CephFSTestCase):
names = self.fs.get_rank_names()
for n in names:
- s = self.fs.mon_manager.raw_cluster_cmd("config", "show", "mds."+n)
+ s = self.get_ceph_cmd_stdout("config", "show", "mds."+n)
self.assertTrue("NAME" in s)
self.assertTrue("mon_host" in s)
@@ -1113,17 +1164,17 @@ class TestMirroringCommands(CephFSTestCase):
MDSS_REQUIRED = 1
def _enable_mirroring(self, fs_name):
- self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "enable", fs_name)
+ self.run_ceph_cmd("fs", "mirror", "enable", fs_name)
def _disable_mirroring(self, fs_name):
- self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "disable", fs_name)
+ self.run_ceph_cmd("fs", "mirror", "disable", fs_name)
def _add_peer(self, fs_name, peer_spec, remote_fs_name):
peer_uuid = str(uuid.uuid4())
- self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
+ self.run_ceph_cmd("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
def _remove_peer(self, fs_name, peer_uuid):
- self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "peer_remove", fs_name, peer_uuid)
+ self.run_ceph_cmd("fs", "mirror", "peer_remove", fs_name, peer_uuid)
def _verify_mirroring(self, fs_name, flag_str):
status = self.fs.status()
@@ -1250,6 +1301,10 @@ class TestFsAuthorize(CephFSTestCase):
self.captester.run_mds_cap_tests(PERM)
def test_single_path_rootsquash(self):
+ if not isinstance(self.mount_a, FuseMount):
+ self.skipTest("only FUSE client has CEPHFS_FEATURE_MDS_AUTH_CAPS "
+ "needed to enforce root_squash MDS caps")
+
PERM = 'rw'
FS_AUTH_CAPS = (('/', PERM, 'root_squash'),)
self.captester = CapTester()
@@ -1259,7 +1314,36 @@ class TestFsAuthorize(CephFSTestCase):
# Since root_squash is set in client caps, client can read but not
# write even thought access level is set to "rw".
self.captester.conduct_pos_test_for_read_caps()
+ self.captester.conduct_pos_test_for_open_caps()
self.captester.conduct_neg_test_for_write_caps(sudo_write=True)
+ self.captester.conduct_neg_test_for_chown_caps()
+ self.captester.conduct_neg_test_for_truncate_caps()
+
+ def test_single_path_rootsquash_issue_56067(self):
+ """
+ That a FS client using root squash MDS caps allows non-root user to write data
+ to a file. And after client remount, the non-root user can read the data that
+ was previously written by it. https://tracker.ceph.com/issues/56067
+ """
+ if not isinstance(self.mount_a, FuseMount):
+ self.skipTest("only FUSE client has CEPHFS_FEATURE_MDS_AUTH_CAPS "
+ "needed to enforce root_squash MDS caps")
+
+ keyring = self.fs.authorize(self.client_id, ('/', 'rw', 'root_squash'))
+ keyring_path = self.mount_a.client_remote.mktemp(data=keyring)
+ self.mount_a.remount(client_id=self.client_id,
+ client_keyring_path=keyring_path,
+ cephfs_mntpt='/')
+ filedata, filename = 'some data on fs 1', 'file_on_fs1'
+ filepath = os_path_join(self.mount_a.hostfs_mntpt, filename)
+ self.mount_a.write_file(filepath, filedata)
+
+ self.mount_a.remount(client_id=self.client_id,
+ client_keyring_path=keyring_path,
+ cephfs_mntpt='/')
+ if filepath.find(self.mount_a.hostfs_mntpt) != -1:
+ contents = self.mount_a.read_file(filepath)
+ self.assertEqual(filedata, contents)
def test_single_path_authorize_on_nonalphanumeric_fsname(self):
"""
@@ -1271,10 +1355,10 @@ class TestFsAuthorize(CephFSTestCase):
fs_name = "cephfs-_."
self.fs = self.mds_cluster.newfs(name=fs_name)
self.fs.wait_for_daemons()
- self.run_cluster_cmd(f'auth caps client.{self.mount_a.client_id} '
- f'mon "allow r" '
- f'osd "allow rw pool={self.fs.get_data_pool_name()}" '
- f'mds allow')
+ self.run_ceph_cmd(f'auth caps client.{self.mount_a.client_id} '
+ f'mon "allow r" '
+ f'osd "allow rw pool={self.fs.get_data_pool_name()}" '
+ f'mds allow')
self.mount_a.remount(cephfs_name=self.fs.name)
PERM = 'rw'
FS_AUTH_CAPS = (('/', PERM),)
@@ -1303,7 +1387,7 @@ class TestFsAuthorize(CephFSTestCase):
self.run_cap_test_one_by_one(FS_AUTH_CAPS)
def run_cap_test_one_by_one(self, fs_auth_caps):
- keyring = self.run_cluster_cmd(f'auth get {self.client_name}')
+ keyring = self.run_ceph_cmd(f'auth get {self.client_name}')
for i, c in enumerate(fs_auth_caps):
self.assertIn(i, (0, 1))
PATH = c[0]
@@ -1315,7 +1399,7 @@ class TestFsAuthorize(CephFSTestCase):
def tearDown(self):
self.mount_a.umount_wait()
- self.run_cluster_cmd(f'auth rm {self.client_name}')
+ self.run_ceph_cmd(f'auth rm {self.client_name}')
super(type(self), self).tearDown()
@@ -1492,3 +1576,68 @@ class TestFsBalRankMask(CephFSTestCase):
self.fs.set_bal_rank_mask(bal_rank_mask)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
+
+
+class TestPermErrMsg(CephFSTestCase):
+
+ CLIENT_NAME = 'client.testuser'
+ FS1_NAME, FS2_NAME, FS3_NAME = 'abcd', 'efgh', 'ijkl'
+
+ EXPECTED_ERRNO = 22
+ EXPECTED_ERRMSG = ("Permission flags in MDS caps must start with 'r' or "
+ "'rw' or be '*' or 'all'")
+
+ MONCAP = f'allow r fsname={FS1_NAME}'
+ OSDCAP = f'allow rw tag cephfs data={FS1_NAME}'
+ MDSCAPS = [
+ 'allow w',
+ f'allow w fsname={FS1_NAME}',
+
+ f'allow rw fsname={FS1_NAME}, allow w fsname={FS2_NAME}',
+ f'allow w fsname={FS1_NAME}, allow rw fsname={FS2_NAME}',
+ f'allow w fsname={FS1_NAME}, allow w fsname={FS2_NAME}',
+
+ (f'allow rw fsname={FS1_NAME}, allow rw fsname={FS2_NAME}, allow '
+ f'w fsname={FS3_NAME}'),
+
+ # without space after comma
+ f'allow rw fsname={FS1_NAME},allow w fsname={FS2_NAME}',
+
+
+ 'allow wr',
+ f'allow wr fsname={FS1_NAME}',
+
+ f'allow rw fsname={FS1_NAME}, allow wr fsname={FS2_NAME}',
+ f'allow wr fsname={FS1_NAME}, allow rw fsname={FS2_NAME}',
+ f'allow wr fsname={FS1_NAME}, allow wr fsname={FS2_NAME}',
+
+ (f'allow rw fsname={FS1_NAME}, allow rw fsname={FS2_NAME}, allow '
+ f'wr fsname={FS3_NAME}'),
+
+ # without space after comma
+ f'allow rw fsname={FS1_NAME},allow wr fsname={FS2_NAME}']
+
+ def _negtestcmd(self, SUBCMD, MDSCAP):
+ return self.negtest_ceph_cmd(
+ args=(f'{SUBCMD} {self.CLIENT_NAME} '
+ f'mon "{self.MONCAP}" osd "{self.OSDCAP}" mds "{MDSCAP}"'),
+ retval=self.EXPECTED_ERRNO, errmsgs=self.EXPECTED_ERRMSG)
+
+ def test_auth_add(self):
+ for mdscap in self.MDSCAPS:
+ self._negtestcmd('auth add', mdscap)
+
+ def test_auth_get_or_create(self):
+ for mdscap in self.MDSCAPS:
+ self._negtestcmd('auth get-or-create', mdscap)
+
+ def test_auth_get_or_create_key(self):
+ for mdscap in self.MDSCAPS:
+ self._negtestcmd('auth get-or-create-key', mdscap)
+
+ def test_fs_authorize(self):
+ for wrong_perm in ('w', 'wr'):
+ self.negtest_ceph_cmd(
+ args=(f'fs authorize {self.fs.name} {self.CLIENT_NAME} / '
+ f'{wrong_perm}'), retval=self.EXPECTED_ERRNO,
+ errmsgs=self.EXPECTED_ERRMSG)