summaryrefslogtreecommitdiffstats
path: root/qa/tasks/rados.py
blob: a730a72993c246f0313acc757b3e5b28a8e12124 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
"""
Rados modle-based integration tests
"""
import contextlib
import logging
import gevent
from teuthology import misc as teuthology


from teuthology.orchestra import run

log = logging.getLogger(__name__)

@contextlib.contextmanager
def task(ctx, config):
    """
    Run RadosModel-based integration tests.

    The config should be as follows::

        rados:
          clients: [client list]
          ops: <number of ops>
          objects: <number of objects to use>
          max_in_flight: <max number of operations in flight>
          object_size: <size of objects in bytes>
          min_stride_size: <minimum write stride size in bytes>
          max_stride_size: <maximum write stride size in bytes>
          op_weights: <dictionary mapping operation type to integer weight>
          runs: <number of times to run> - the pool is remade between runs
          ec_pool: use an ec pool
          erasure_code_profile: profile to use with the erasure coded pool
          fast_read: enable ec_pool's fast_read
          min_size: set the min_size of created pool
          pool_snaps: use pool snapshots instead of selfmanaged snapshots
	  write_fadvise_dontneed: write behavior like with LIBRADOS_OP_FLAG_FADVISE_DONTNEED.
	                          This mean data don't access in the near future.
				  Let osd backend don't keep data in cache.

    For example::

        tasks:
        - ceph:
        - rados:
            clients: [client.0]
            ops: 1000
            max_seconds: 0   # 0 for no limit
            objects: 25
            max_in_flight: 16
            object_size: 4000000
            min_stride_size: 1024
            max_stride_size: 4096
            op_weights:
              read: 20
              write: 10
              delete: 2
              snap_create: 3
              rollback: 2
              snap_remove: 0
            ec_pool: create an ec pool, defaults to False
            erasure_code_use_overwrites: test overwrites, default false
            erasure_code_profile:
              name: teuthologyprofile
              k: 2
              m: 1
              crush-failure-domain: osd
            pool_snaps: true
	    write_fadvise_dontneed: true
            runs: 10
        - interactive:

    Optionally, you can provide the pool name to run against:

        tasks:
        - ceph:
        - exec:
            client.0:
              - ceph osd pool create foo
        - rados:
            clients: [client.0]
            pools: [foo]
            ...

    Alternatively, you can provide a pool prefix:

        tasks:
        - ceph:
        - exec:
            client.0:
              - ceph osd pool create foo.client.0
        - rados:
            clients: [client.0]
            pool_prefix: foo
            ...

    The tests are run asynchronously, they are not complete when the task
    returns. For instance:

        - rados:
            clients: [client.0]
            pools: [ecbase]
            ops: 4000
            objects: 500
            op_weights:
              read: 100
              write: 100
              delete: 50
              copy_from: 50
        - print: "**** done rados ec-cache-agent (part 2)"

     will run the print task immediately after the rados tasks begins but
     not after it completes. To make the rados task a blocking / sequential
     task, use:

        - sequential:
          - rados:
              clients: [client.0]
              pools: [ecbase]
              ops: 4000
              objects: 500
              op_weights:
                read: 100
                write: 100
                delete: 50
                copy_from: 50
        - print: "**** done rados ec-cache-agent (part 2)"

    """
    log.info('Beginning rados...')
    assert isinstance(config, dict), \
        "please list clients to run on"

    object_size = int(config.get('object_size', 4000000))
    op_weights = config.get('op_weights', {})
    testdir = teuthology.get_testdir(ctx)
    args = [
        'adjust-ulimits',
        'ceph-coverage',
        '{tdir}/archive/coverage'.format(tdir=testdir),
        'ceph_test_rados']
    if config.get('ec_pool', False):
        args.extend(['--no-omap'])
        if not config.get('erasure_code_use_overwrites', False):
            args.extend(['--ec-pool'])
    if config.get('write_fadvise_dontneed', False):
        args.extend(['--write-fadvise-dontneed'])
    if config.get('set_redirect', False):
        args.extend(['--set_redirect'])
    if config.get('set_chunk', False):
        args.extend(['--set_chunk'])
    if config.get('enable_dedup', False):
        args.extend(['--enable_dedup'])
    if config.get('low_tier_pool', None):
        args.extend(['--low_tier_pool', config.get('low_tier_pool', None)])
    if config.get('dedup_chunk_size', False):
        args.extend(['--dedup_chunk_size', config.get('dedup_chunk_size', None)] )
    if config.get('dedup_chunk_algo', False):
        args.extend(['--dedup_chunk_algo', config.get('dedup_chunk_algo', None)])
    if config.get('pool_snaps', False):
        args.extend(['--pool-snaps'])
    if config.get('balance_reads', False):
        args.extend(['--balance-reads'])
    if config.get('localize_reads', False):
        args.extend(['--localize-reads'])
    args.extend([
        '--max-ops', str(config.get('ops', 10000)),
        '--objects', str(config.get('objects', 500)),
        '--max-in-flight', str(config.get('max_in_flight', 16)),
        '--size', str(object_size),
        '--min-stride-size', str(config.get('min_stride_size', object_size // 10)),
        '--max-stride-size', str(config.get('max_stride_size', object_size // 5)),
        '--max-seconds', str(config.get('max_seconds', 0))
        ])

    weights = {}
    weights['read'] = 100
    weights['write'] = 100
    weights['delete'] = 10
    # Parallel of the op_types in test/osd/TestRados.cc
    for field in [
        # read handled above
        # write handled above
        # delete handled above
        "snap_create",
        "snap_remove",
        "rollback",
        "setattr",
        "rmattr",
        "watch",
        "copy_from",
        "hit_set_list",
        "is_dirty",
        "undirty",
        "cache_flush",
        "cache_try_flush",
        "cache_evict",
        "append",
        "write",
        "read",
        "delete",
        "set_chunk",
        "tier_promote",
        "tier_evict",
        "tier_promote",
        "tier_flush"
        ]:
        if field in op_weights:
            weights[field] = op_weights[field]

    if config.get('write_append_excl', True):
        if 'write' in weights:
            weights['write'] = weights['write'] // 2
            weights['write_excl'] = weights['write']

        if 'append' in weights:
            weights['append'] = weights['append'] // 2
            weights['append_excl'] = weights['append']

    for op, weight in weights.items():
        args.extend([
            '--op', op, str(weight)
        ])
                

    def thread():
        """Thread spawned by gevent"""
        clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
        log.info('clients are %s' % clients)
        manager = ctx.managers['ceph']
        if config.get('ec_pool', False):
            profile = config.get('erasure_code_profile', {})
            profile_name = profile.get('name', 'teuthologyprofile')
            manager.create_erasure_code_profile(profile_name, profile)
        else:
            profile_name = None
        for i in range(int(config.get('runs', '1'))):
            log.info("starting run %s out of %s", str(i), config.get('runs', '1'))
            tests = {}
            existing_pools = config.get('pools', [])
            created_pools = []
            for role in config.get('clients', clients):
                assert isinstance(role, str)
                PREFIX = 'client.'
                assert role.startswith(PREFIX)
                id_ = role[len(PREFIX):]

                pool = config.get('pool', None)
                if not pool and existing_pools:
                    pool = existing_pools.pop()
                else:
                    pool = manager.create_pool_with_unique_name(
                        erasure_code_profile_name=profile_name,
                        erasure_code_use_overwrites=
                          config.get('erasure_code_use_overwrites', False)
                    )
                    created_pools.append(pool)
                    if config.get('fast_read', False):
                        manager.raw_cluster_cmd(
                            'osd', 'pool', 'set', pool, 'fast_read', 'true')
                    min_size = config.get('min_size', None);
                    if min_size is not None:
                        manager.raw_cluster_cmd(
                            'osd', 'pool', 'set', pool, 'min_size', str(min_size))

                (remote,) = ctx.cluster.only(role).remotes.keys()
                proc = remote.run(
                    args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args +
                    ["--pool", pool],
                    logger=log.getChild("rados.{id}".format(id=id_)),
                    stdin=run.PIPE,
                    wait=False
                    )
                tests[id_] = proc
            run.wait(tests.values())

            for pool in created_pools:
                manager.wait_snap_trimming_complete(pool);
                manager.remove_pool(pool)

    running = gevent.spawn(thread)

    try:
        yield
    finally:
        log.info('joining rados')
        running.get()