summaryrefslogtreecommitdiffstats
path: root/qa/tasks/cephfs/test_flush.py
blob: ee0b1c92b1992e64bae7cfff36a31f205dad890a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
from textwrap import dedent
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from tasks.cephfs.filesystem import ObjectNotFound, ROOT_INO


class TestFlush(CephFSTestCase):
    def test_flush(self):
        self.mount_a.run_shell(["mkdir", "mydir"])
        self.mount_a.run_shell(["touch", "mydir/alpha"])
        dir_ino = self.mount_a.path_to_ino("mydir")
        file_ino = self.mount_a.path_to_ino("mydir/alpha")

        # Unmount the client so that it isn't still holding caps
        self.mount_a.umount_wait()

        # Before flush, the dirfrag object does not exist
        with self.assertRaises(ObjectNotFound):
            self.fs.list_dirfrag(dir_ino)

        # Before flush, the file's backtrace has not been written
        with self.assertRaises(ObjectNotFound):
            self.fs.read_backtrace(file_ino)

        # Before flush, there are no dentries in the root
        self.assertEqual(self.fs.list_dirfrag(ROOT_INO), [])

        # Execute flush
        flush_data = self.fs.mds_asok(["flush", "journal"])
        self.assertEqual(flush_data['return_code'], 0)

        # After flush, the dirfrag object has been created
        dir_list = self.fs.list_dirfrag(dir_ino)
        self.assertEqual(dir_list, ["alpha_head"])

        # And the 'mydir' dentry is in the root
        self.assertEqual(self.fs.list_dirfrag(ROOT_INO), ['mydir_head'])

        # ...and the data object has its backtrace
        backtrace = self.fs.read_backtrace(file_ino)
        self.assertEqual(['alpha', 'mydir'], [a['dname'] for a in backtrace['ancestors']])
        self.assertEqual([dir_ino, 1], [a['dirino'] for a in backtrace['ancestors']])
        self.assertEqual(file_ino, backtrace['ino'])

        # ...and the journal is truncated to just a single subtreemap from the
        # newly created segment
        summary_output = self.fs.journal_tool(["event", "get", "summary"], 0)
        try:
            self.assertEqual(summary_output,
                             dedent(
                                 """
                                 Events by type:
                                   SUBTREEMAP: 1
                                 Errors: 0
                                 """
                             ).strip())
        except AssertionError:
            # In some states, flushing the journal will leave you
            # an extra event from locks a client held.   This is
            # correct behaviour: the MDS is flushing the journal,
            # it's just that new events are getting added too.
            # In this case, we should nevertheless see a fully
            # empty journal after a second flush.
            self.assertEqual(summary_output,
                             dedent(
                                 """
                                 Events by type:
                                   SUBTREEMAP: 1
                                   UPDATE: 1
                                 Errors: 0
                                 """
                             ).strip())
            flush_data = self.fs.mds_asok(["flush", "journal"])
            self.assertEqual(flush_data['return_code'], 0)
            self.assertEqual(self.fs.journal_tool(["event", "get", "summary"], 0),
                             dedent(
                                 """
                                 Events by type:
                                   SUBTREEMAP: 1
                                 Errors: 0
                                 """
                             ).strip())

        # Now for deletion!
        # We will count the RADOS deletions and MDS file purges, to verify that
        # the expected behaviour is happening as a result of the purge
        initial_dels = self.fs.mds_asok(['perf', 'dump', 'objecter'])['objecter']['osdop_delete']
        initial_purges = self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_enqueued']

        # Use a client to delete a file
        self.mount_a.mount()
        self.mount_a.wait_until_mounted()
        self.mount_a.run_shell(["rm", "-rf", "mydir"])

        # Flush the journal so that the directory inode can be purged
        flush_data = self.fs.mds_asok(["flush", "journal"])
        self.assertEqual(flush_data['return_code'], 0)

        # We expect to see a single file purge
        self.wait_until_true(
            lambda: self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_enqueued'] - initial_purges >= 2,
            60)

        # We expect two deletions, one of the dirfrag and one of the backtrace
        self.wait_until_true(
            lambda: self.fs.mds_asok(['perf', 'dump', 'objecter'])['objecter']['osdop_delete'] - initial_dels >= 2,
            60)  # timeout is fairly long to allow for tick+rados latencies

        with self.assertRaises(ObjectNotFound):
            self.fs.list_dirfrag(dir_ino)
        with self.assertRaises(ObjectNotFound):
            self.fs.read_backtrace(file_ino)
        self.assertEqual(self.fs.list_dirfrag(ROOT_INO), [])