1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
|
from io import StringIO
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from teuthology.exceptions import CommandFailedError
from textwrap import dedent
from threading import Thread
import errno
import platform
import time
import json
import logging
import os
import re
log = logging.getLogger(__name__)
class TestMisc(CephFSTestCase):
CLIENTS_REQUIRED = 2
def test_statfs_on_deleted_fs(self):
"""
That statfs does not cause monitors to SIGSEGV after fs deletion.
"""
self.mount_b.umount_wait()
self.mount_a.run_shell_payload("stat -f .")
self.fs.delete_all_filesystems()
# This will hang either way, run in background.
p = self.mount_a.run_shell_payload("stat -f .", wait=False, timeout=60, check_status=False)
time.sleep(30)
self.assertFalse(p.finished)
# the process is stuck in uninterruptible sleep, just kill the mount
self.mount_a.umount_wait(force=True)
p.wait()
def test_fuse_mount_on_already_mounted_path(self):
if platform.system() != "Linux":
self.skipTest("Require Linux platform")
if not isinstance(self.mount_a, FuseMount):
self.skipTest("Require FUSE client")
# Try to mount already mounted path
# expecting EBUSY error
try:
mount_cmd = ['sudo'] + self.mount_a._mount_bin + [self.mount_a.hostfs_mntpt]
self.mount_a.client_remote.run(args=mount_cmd, stderr=StringIO(),
stdout=StringIO(), timeout=60, omit_sudo=False)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EBUSY)
else:
self.fail("Expected EBUSY")
def test_getattr_caps(self):
"""
Check if MDS recognizes the 'mask' parameter of open request.
The parameter allows client to request caps when opening file
"""
if not isinstance(self.mount_a, FuseMount):
self.skipTest("Require FUSE client")
# Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
# on lookup/open
self.mount_b.umount_wait()
self.set_conf('client', 'client debug getattr caps', 'true')
self.mount_b.mount_wait()
# create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
# to mount_a
p = self.mount_a.open_background("testfile")
self.mount_b.wait_for_visible("testfile")
# this triggers a lookup request and an open request. The debug
# code will check if lookup/open reply contains xattrs
self.mount_b.run_shell(["cat", "testfile"])
self.mount_a.kill_background(p)
def test_root_rctime(self):
"""
Check that the root inode has a non-default rctime on startup.
"""
t = time.time()
rctime = self.mount_a.getfattr(".", "ceph.dir.rctime")
log.info("rctime = {}".format(rctime))
self.assertGreaterEqual(float(rctime), t - 10)
def test_fs_new(self):
self.mount_a.umount_wait()
self.mount_b.umount_wait()
data_pool_name = self.fs.get_data_pool_name()
self.fs.fail()
self.run_ceph_cmd('fs', 'rm', self.fs.name, '--yes-i-really-mean-it')
self.run_ceph_cmd('osd', 'pool', 'delete',
self.fs.metadata_pool_name,
self.fs.metadata_pool_name,
'--yes-i-really-really-mean-it')
self.run_ceph_cmd('osd', 'pool', 'create',
self.fs.metadata_pool_name,
'--pg_num_min', str(self.fs.pg_num_min))
# insert a garbage object
self.fs.radosm(["put", "foo", "-"], stdin=StringIO("bar"))
def get_pool_df(fs, name):
try:
return fs.get_pool_df(name)['objects'] > 0
except RuntimeError:
return False
self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
try:
self.run_ceph_cmd('fs', 'new', self.fs.name,
self.fs.metadata_pool_name,
data_pool_name)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
raise AssertionError("Expected EINVAL")
self.run_ceph_cmd('fs', 'new', self.fs.name,
self.fs.metadata_pool_name,
data_pool_name, "--force")
self.run_ceph_cmd('fs', 'fail', self.fs.name)
self.run_ceph_cmd('fs', 'rm', self.fs.name,
'--yes-i-really-mean-it')
self.run_ceph_cmd('osd', 'pool', 'delete',
self.fs.metadata_pool_name,
self.fs.metadata_pool_name,
'--yes-i-really-really-mean-it')
self.run_ceph_cmd('osd', 'pool', 'create',
self.fs.metadata_pool_name,
'--pg_num_min', str(self.fs.pg_num_min))
self.run_ceph_cmd('fs', 'new', self.fs.name,
self.fs.metadata_pool_name,
data_pool_name,
'--allow_dangerous_metadata_overlay')
def test_cap_revoke_nonresponder(self):
"""
Check that a client is evicted if it has not responded to cap revoke
request for configured number of seconds.
"""
session_timeout = self.fs.get_var("session_timeout")
eviction_timeout = session_timeout / 2.0
self.fs.mds_asok(['config', 'set', 'mds_cap_revoke_eviction_timeout',
str(eviction_timeout)])
cap_holder = self.mount_a.open_background()
# Wait for the file to be visible from another client, indicating
# that mount_a has completed its network ops
self.mount_b.wait_for_visible()
# Simulate client death
self.mount_a.suspend_netns()
try:
# The waiter should get stuck waiting for the capability
# held on the MDS by the now-dead client A
cap_waiter = self.mount_b.write_background()
a = time.time()
time.sleep(eviction_timeout)
cap_waiter.wait()
b = time.time()
cap_waited = b - a
log.info("cap_waiter waited {0}s".format(cap_waited))
# check if the cap is transferred before session timeout kicked in.
# this is a good enough check to ensure that the client got evicted
# by the cap auto evicter rather than transitioning to stale state
# and then getting evicted.
self.assertLess(cap_waited, session_timeout,
"Capability handover took {0}, expected less than {1}".format(
cap_waited, session_timeout
))
self.assertTrue(self.mds_cluster.is_addr_blocklisted(
self.mount_a.get_global_addr()))
self.mount_a._kill_background(cap_holder)
finally:
self.mount_a.resume_netns()
def test_filtered_df(self):
pool_name = self.fs.get_data_pool_name()
raw_df = self.fs.get_pool_df(pool_name)
raw_avail = float(raw_df["max_avail"])
out = self.get_ceph_cmd_stdout('osd', 'pool', 'get', pool_name,
'size', '-f', 'json-pretty')
_ = json.loads(out)
proc = self.mount_a.run_shell(['df', '.'])
output = proc.stdout.getvalue()
fs_avail = output.split('\n')[1].split()[3]
fs_avail = float(fs_avail) * 1024
ratio = raw_avail / fs_avail
self.assertTrue(0.9 < ratio < 1.1)
def test_dump_inode(self):
info = self.fs.mds_asok(['dump', 'inode', '1'])
self.assertEqual(info['path'], "/")
def test_dump_inode_hexademical(self):
self.mount_a.run_shell(["mkdir", "-p", "foo"])
ino = self.mount_a.path_to_ino("foo")
self.assertTrue(type(ino) is int)
info = self.fs.mds_asok(['dump', 'inode', hex(ino)])
self.assertEqual(info['path'], "/foo")
def test_dump_dir(self):
self.mount_a.run_shell(["mkdir", "-p", "foo/bar"])
dirs = self.fs.mds_asok(['dump', 'dir', '/foo'])
self.assertTrue(type(dirs) is list)
for dir in dirs:
self.assertEqual(dir['path'], "/foo")
self.assertFalse("dentries" in dir)
dirs = self.fs.mds_asok(['dump', 'dir', '/foo', '--dentry_dump'])
self.assertTrue(type(dirs) is list)
found_dentry = False
for dir in dirs:
self.assertEqual(dir['path'], "/foo")
self.assertTrue(type(dir['dentries']) is list)
if found_dentry:
continue
for dentry in dir['dentries']:
if dentry['path'] == "foo/bar":
found_dentry = True
break
self.assertTrue(found_dentry)
def test_fs_lsflags(self):
"""
Check that the lsflags displays the default state and the new state of flags
"""
# Set some flags
self.fs.set_joinable(False)
self.fs.set_allow_new_snaps(False)
self.fs.set_allow_standby_replay(True)
lsflags = json.loads(self.get_ceph_cmd_stdout(
'fs', 'lsflags', self.fs.name, "--format=json-pretty"))
self.assertEqual(lsflags["joinable"], False)
self.assertEqual(lsflags["allow_snaps"], False)
self.assertEqual(lsflags["allow_multimds_snaps"], True)
self.assertEqual(lsflags["allow_standby_replay"], True)
def _test_sync_stuck_for_around_5s(self, dir_path, file_sync=False):
self.mount_a.run_shell(["mkdir", dir_path])
sync_dir_pyscript = dedent("""
import os
path = "{path}"
dfd = os.open(path, os.O_DIRECTORY)
os.fsync(dfd)
os.close(dfd)
""".format(path=dir_path))
# run create/delete directories and test the sync time duration
for i in range(300):
for j in range(5):
self.mount_a.run_shell(["mkdir", os.path.join(dir_path, f"{i}_{j}")])
start = time.time()
if file_sync:
self.mount_a.run_shell(['python3', '-c', sync_dir_pyscript], timeout=4)
else:
self.mount_a.run_shell(["sync"], timeout=4)
# the real duration should be less than the rough one
duration = time.time() - start
log.info(f"sync mkdir i = {i}, rough duration = {duration}")
for j in range(5):
self.mount_a.run_shell(["rm", "-rf", os.path.join(dir_path, f"{i}_{j}")])
start = time.time()
if file_sync:
self.mount_a.run_shell(['python3', '-c', sync_dir_pyscript], timeout=4)
else:
self.mount_a.run_shell(["sync"], timeout=4)
# the real duration should be less than the rough one
duration = time.time() - start
log.info(f"sync rmdir i = {i}, rough duration = {duration}")
self.mount_a.run_shell(["rm", "-rf", dir_path])
def test_filesystem_sync_stuck_for_around_5s(self):
"""
To check whether the filesystem sync will be stuck to wait for the
mdlog to be flushed for at most 5 seconds.
"""
dir_path = "filesystem_sync_do_not_wait_mdlog_testdir"
self._test_sync_stuck_for_around_5s(dir_path)
def test_file_sync_stuck_for_around_5s(self):
"""
To check whether the fsync will be stuck to wait for the mdlog to
be flushed for at most 5 seconds.
"""
dir_path = "file_sync_do_not_wait_mdlog_testdir"
self._test_sync_stuck_for_around_5s(dir_path, True)
def test_file_filesystem_sync_crash(self):
"""
To check whether the kernel crashes when doing the file/filesystem sync.
"""
stop_thread = False
dir_path = "file_filesystem_sync_crash_testdir"
self.mount_a.run_shell(["mkdir", dir_path])
def mkdir_rmdir_thread(mount, path):
#global stop_thread
log.info(" mkdir_rmdir_thread starting...")
num = 0
while not stop_thread:
n = num
m = num
for __ in range(10):
mount.run_shell(["mkdir", os.path.join(path, f"{n}")])
n += 1
for __ in range(10):
mount.run_shell(["rm", "-rf", os.path.join(path, f"{m}")])
m += 1
num += 10
log.info(" mkdir_rmdir_thread stopped")
def filesystem_sync_thread(mount, path):
#global stop_thread
log.info(" filesystem_sync_thread starting...")
while not stop_thread:
mount.run_shell(["sync"])
log.info(" filesystem_sync_thread stopped")
def file_sync_thread(mount, path):
#global stop_thread
log.info(" file_sync_thread starting...")
pyscript = dedent("""
import os
path = "{path}"
dfd = os.open(path, os.O_DIRECTORY)
os.fsync(dfd)
os.close(dfd)
""".format(path=path))
while not stop_thread:
mount.run_shell(['python3', '-c', pyscript])
log.info(" file_sync_thread stopped")
td1 = Thread(target=mkdir_rmdir_thread, args=(self.mount_a, dir_path,))
td2 = Thread(target=filesystem_sync_thread, args=(self.mount_a, dir_path,))
td3 = Thread(target=file_sync_thread, args=(self.mount_a, dir_path,))
td1.start()
td2.start()
td3.start()
time.sleep(1200) # run 20 minutes
stop_thread = True
td1.join()
td2.join()
td3.join()
self.mount_a.run_shell(["rm", "-rf", dir_path])
def test_dump_inmemory_log_on_client_eviction(self):
"""
That the in-memory logs are dumped during a client eviction event.
"""
self.fs.mds_asok(['config', 'set', 'debug_mds', '1/10'])
self.fs.mds_asok(['config', 'set', 'mds_extraordinary_events_dump_interval', '1'])
mount_a_client_id = self.mount_a.get_global_id()
infos = self.fs.status().get_ranks(self.fs.id)
#evict the client
self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id])
time.sleep(10) #wait for 10 seconds for the logs dumping to complete.
#The client is evicted, so unmount it.
try:
self.mount_a.umount_wait(require_clean=True, timeout=30)
except:
pass #continue with grepping the log
eviction_log = f"Evicting (\(and blocklisting\) )?client session {mount_a_client_id} \(.+:.+/.+\)"
search_range = "/^--- begin dump of recent events ---$/,/^--- end dump of recent events ---$/p"
for info in infos:
mds_id = info['name']
try:
remote = self.fs.mon_manager.find_remote('mds', mds_id)
out = remote.run(args=["sed",
"-n",
"{0}".format(search_range),
f"/var/log/ceph/{self.mount_a.cluster_name}-mds.{mds_id}.log"],
stdout=StringIO(), timeout=30)
except:
continue #continue with the next info
if out.stdout and re.search(eviction_log, out.stdout.getvalue().strip()):
return
self.assertTrue(False, "Failed to dump in-memory logs during client eviction")
def test_dump_inmemory_log_on_missed_beacon_ack_from_monitors(self):
"""
That the in-memory logs are dumped when the mds misses beacon ACKs from monitors.
"""
self.fs.mds_asok(['config', 'set', 'debug_mds', '1/10'])
self.fs.mds_asok(['config', 'set', 'mds_extraordinary_events_dump_interval', '1'])
try:
mons = json.loads(self.get_ceph_cmd_stdout('mon', 'dump', '-f', 'json'))['mons']
except:
self.assertTrue(False, "Error fetching monitors")
#Freeze all monitors
for mon in mons:
mon_name = mon['name']
log.info(f'Sending STOP to mon {mon_name}')
self.fs.mon_manager.signal_mon(mon_name, 19)
time.sleep(10) #wait for 10 seconds to get the in-memory logs dumped
#Unfreeze all monitors
for mon in mons:
mon_name = mon['name']
log.info(f'Sending CONT to mon {mon_name}')
self.fs.mon_manager.signal_mon(mon_name, 18)
missed_beacon_ack_log = "missed beacon ack from the monitors"
search_range = "/^--- begin dump of recent events ---$/,/^--- end dump of recent events ---$/p"
for info in self.fs.status().get_ranks(self.fs.id):
mds_id = info['name']
try:
remote = self.fs.mon_manager.find_remote('mds', mds_id)
out = remote.run(args=["sed",
"-n",
"{0}".format(search_range),
f"/var/log/ceph/{self.mount_a.cluster_name}-mds.{mds_id}.log"],
stdout=StringIO(), timeout=30)
except:
continue #continue with the next info
if out.stdout and (missed_beacon_ack_log in out.stdout.getvalue().strip()):
return
self.assertTrue(False, "Failed to dump in-memory logs during missed beacon ack")
def test_dump_inmemory_log_on_missed_internal_heartbeats(self):
"""
That the in-memory logs are dumped when the mds misses internal heartbeats.
"""
self.fs.mds_asok(['config', 'set', 'debug_mds', '1/10'])
self.fs.mds_asok(['config', 'set', 'mds_heartbeat_grace', '1'])
self.fs.mds_asok(['config', 'set', 'mds_extraordinary_events_dump_interval', '1'])
try:
mons = json.loads(self.get_ceph_cmd_stdout('mon', 'dump', '-f', 'json'))['mons']
except:
self.assertTrue(False, "Error fetching monitors")
#Freeze all monitors
for mon in mons:
mon_name = mon['name']
log.info(f'Sending STOP to mon {mon_name}')
self.fs.mon_manager.signal_mon(mon_name, 19)
time.sleep(10) #wait for 10 seconds to get the in-memory logs dumped
#Unfreeze all monitors
for mon in mons:
mon_name = mon['name']
log.info(f'Sending CONT to mon {mon_name}')
self.fs.mon_manager.signal_mon(mon_name, 18)
missed_internal_heartbeat_log = \
"Skipping beacon heartbeat to monitors \(last acked .+s ago\); MDS internal heartbeat is not healthy!"
search_range = "/^--- begin dump of recent events ---$/,/^--- end dump of recent events ---$/p"
for info in self.fs.status().get_ranks(self.fs.id):
mds_id = info['name']
try:
remote = self.fs.mon_manager.find_remote('mds', mds_id)
out = remote.run(args=["sed",
"-n",
"{0}".format(search_range),
f"/var/log/ceph/{self.mount_a.cluster_name}-mds.{mds_id}.log"],
stdout=StringIO(), timeout=30)
except:
continue #continue with the next info
if out.stdout and re.search(missed_internal_heartbeat_log, out.stdout.getvalue().strip()):
return
self.assertTrue(False, "Failed to dump in-memory logs during missed internal heartbeat")
def _session_client_ls(self, cmd):
mount_a_client_id = self.mount_a.get_global_id()
info = self.fs.rank_asok(cmd)
mount_a_mountpoint = self.mount_a.mountpoint
mount_b_mountpoint = self.mount_b.mountpoint
self.assertIsNotNone(info)
for i in range(0, len(info)):
self.assertIn(info[i]["client_metadata"]["mount_point"],
[mount_a_mountpoint, mount_b_mountpoint])
info = self.fs.rank_asok(cmd + [f"id={mount_a_client_id}"])
self.assertEqual(len(info), 1)
self.assertEqual(info[0]["id"], mount_a_client_id)
self.assertEqual(info[0]["client_metadata"]["mount_point"], mount_a_mountpoint)
info = self.fs.rank_asok(cmd + ['--cap_dump'])
for i in range(0, len(info)):
self.assertIn("caps", info[i])
def test_session_ls(self):
self._session_client_ls(['session', 'ls'])
def test_client_ls(self):
self._session_client_ls(['client', 'ls'])
class TestCacheDrop(CephFSTestCase):
CLIENTS_REQUIRED = 1
def _run_drop_cache_cmd(self, timeout=None):
result = None
args = ["cache", "drop"]
if timeout is not None:
args.append(str(timeout))
result = self.fs.rank_tell(args)
return result
def _setup(self, max_caps=20, threshold=400):
# create some files
self.mount_a.create_n_files("dc-dir/dc-file", 1000, sync=True)
# Reduce this so the MDS doesn't rkcall the maximum for simple tests
self.fs.rank_asok(['config', 'set', 'mds_recall_max_caps', str(max_caps)])
self.fs.rank_asok(['config', 'set', 'mds_recall_max_decay_threshold', str(threshold)])
def test_drop_cache_command(self):
"""
Basic test for checking drop cache command.
Confirm it halts without a timeout.
Note that the cache size post trimming is not checked here.
"""
mds_min_caps_per_client = int(self.fs.get_config("mds_min_caps_per_client"))
self._setup()
result = self._run_drop_cache_cmd()
self.assertEqual(result['client_recall']['return_code'], 0)
self.assertEqual(result['flush_journal']['return_code'], 0)
# It should take at least 1 second
self.assertGreater(result['duration'], 1)
self.assertGreaterEqual(result['trim_cache']['trimmed'], 1000-2*mds_min_caps_per_client)
def test_drop_cache_command_timeout(self):
"""
Basic test for checking drop cache command.
Confirm recall halts early via a timeout.
Note that the cache size post trimming is not checked here.
"""
self._setup()
result = self._run_drop_cache_cmd(timeout=10)
self.assertEqual(result['client_recall']['return_code'], -errno.ETIMEDOUT)
self.assertEqual(result['flush_journal']['return_code'], 0)
self.assertGreater(result['duration'], 10)
self.assertGreaterEqual(result['trim_cache']['trimmed'], 100) # we did something, right?
def test_drop_cache_command_dead_timeout(self):
"""
Check drop cache command with non-responding client using tell
interface. Note that the cache size post trimming is not checked
here.
"""
self._setup()
self.mount_a.suspend_netns()
# Note: recall is subject to the timeout. The journal flush will
# be delayed due to the client being dead.
result = self._run_drop_cache_cmd(timeout=5)
self.assertEqual(result['client_recall']['return_code'], -errno.ETIMEDOUT)
self.assertEqual(result['flush_journal']['return_code'], 0)
self.assertGreater(result['duration'], 5)
self.assertLess(result['duration'], 120)
# Note: result['trim_cache']['trimmed'] may be >0 because dropping the
# cache now causes the Locker to drive eviction of stale clients (a
# stale session will be autoclosed at mdsmap['session_timeout']). The
# particular operation causing this is journal flush which causes the
# MDS to wait wait for cap revoke.
#self.assertEqual(0, result['trim_cache']['trimmed'])
self.mount_a.resume_netns()
def test_drop_cache_command_dead(self):
"""
Check drop cache command with non-responding client using tell
interface. Note that the cache size post trimming is not checked
here.
"""
self._setup()
self.mount_a.suspend_netns()
result = self._run_drop_cache_cmd()
self.assertEqual(result['client_recall']['return_code'], 0)
self.assertEqual(result['flush_journal']['return_code'], 0)
self.assertGreater(result['duration'], 5)
self.assertLess(result['duration'], 120)
# Note: result['trim_cache']['trimmed'] may be >0 because dropping the
# cache now causes the Locker to drive eviction of stale clients (a
# stale session will be autoclosed at mdsmap['session_timeout']). The
# particular operation causing this is journal flush which causes the
# MDS to wait wait for cap revoke.
self.mount_a.resume_netns()
class TestSkipReplayInoTable(CephFSTestCase):
MDSS_REQUIRED = 1
CLIENTS_REQUIRED = 1
def test_alloc_cinode_assert(self):
"""
Test alloc CInode assert.
See: https://tracker.ceph.com/issues/52280
"""
# Create a directory and the mds will journal this and then crash
self.mount_a.run_shell(["rm", "-rf", "test_alloc_ino"])
self.mount_a.run_shell(["mkdir", "test_alloc_ino"])
status = self.fs.status()
rank0 = self.fs.get_rank(rank=0, status=status)
self.fs.mds_asok(['config', 'set', 'mds_kill_skip_replaying_inotable', "true"])
# This will make the MDS crash, since we only have one MDS in the
# cluster and without the "wait=False" it will stuck here forever.
self.mount_a.run_shell(["mkdir", "test_alloc_ino/dir1"], wait=False)
# sleep 10 seconds to make sure the journal logs are flushed and
# the mds crashes
time.sleep(10)
# Now set the mds config to skip replaying the inotable
self.fs.set_ceph_conf('mds', 'mds_inject_skip_replaying_inotable', True)
self.fs.set_ceph_conf('mds', 'mds_wipe_sessions', True)
self.fs.mds_restart()
# sleep 5 seconds to make sure the mds tell command won't stuck
time.sleep(5)
self.fs.wait_for_daemons()
self.delete_mds_coredump(rank0['name']);
self.mount_a.run_shell(["mkdir", "test_alloc_ino/dir2"])
ls_out = set(self.mount_a.ls("test_alloc_ino/"))
self.assertEqual(ls_out, set({"dir1", "dir2"}))
|