diff options
Diffstat (limited to 'src/test/pybind')
-rw-r--r-- | src/test/pybind/CMakeLists.txt | 2 | ||||
-rwxr-xr-x | src/test/pybind/test_ceph_argparse.py | 1343 | ||||
-rwxr-xr-x | src/test/pybind/test_ceph_daemon.py | 47 | ||||
-rw-r--r-- | src/test/pybind/test_cephfs.py | 434 | ||||
-rw-r--r-- | src/test/pybind/test_rados.py | 1114 | ||||
-rw-r--r-- | src/test/pybind/test_rbd.py | 2310 | ||||
-rw-r--r-- | src/test/pybind/test_rgwfs.py | 144 |
7 files changed, 5394 insertions, 0 deletions
diff --git a/src/test/pybind/CMakeLists.txt b/src/test/pybind/CMakeLists.txt new file mode 100644 index 00000000..bc91bce3 --- /dev/null +++ b/src/test/pybind/CMakeLists.txt @@ -0,0 +1,2 @@ +add_ceph_test(test_ceph_daemon.py ${CMAKE_CURRENT_SOURCE_DIR}/test_ceph_daemon.py) +add_ceph_test(test_ceph_argparse.py ${CMAKE_CURRENT_SOURCE_DIR}/test_ceph_argparse.py) diff --git a/src/test/pybind/test_ceph_argparse.py b/src/test/pybind/test_ceph_argparse.py new file mode 100755 index 00000000..85484377 --- /dev/null +++ b/src/test/pybind/test_ceph_argparse.py @@ -0,0 +1,1343 @@ +#!/usr/bin/env nosetests +# -*- mode:python; tab-width:4; indent-tabs-mode:t; coding:utf-8 -*- +# vim: ts=4 sw=4 smarttab expandtab fileencoding=utf-8 +# +# Ceph - scalable distributed file system +# +# Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com> +# Copyright (C) 2014 Red Hat <contact@redhat.com> +# +# Author: Loic Dachary <loic@dachary.org> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# + +from nose.tools import eq_ as eq +from nose.tools import * +from unittest import TestCase + +from ceph_argparse import validate_command, parse_json_funcsigs, validate, \ + parse_funcsig, ArgumentError, ArgumentTooFew, ArgumentMissing, \ + ArgumentNumber, ArgumentValid + +import os +import random +import re +import string +import sys +import json +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + +def get_command_descriptions(what): + CEPH_BIN = os.environ['CEPH_BIN'] + if CEPH_BIN == "": + CEPH_BIN = "." + return os.popen(CEPH_BIN + "/get_command_descriptions " + "--" + what).read() + +def test_parse_json_funcsigs(): + commands = get_command_descriptions("all") + cmd_json = parse_json_funcsigs(commands, 'cli') + + # syntax error https://github.com/ceph/ceph/pull/585 + commands = get_command_descriptions("pull585") + assert_raises(TypeError, parse_json_funcsigs, commands, 'cli') + +sigdict = parse_json_funcsigs(get_command_descriptions("all"), 'cli') + + +class TestArgparse: + + def assert_valid_command(self, args): + result = validate_command(sigdict, args) + assert_not_in(result, [{}, None]) + + def check_1_natural_arg(self, prefix, command): + self.assert_valid_command([prefix, command, '1']) + assert_equal({}, validate_command(sigdict, [prefix, command])) + assert_equal({}, validate_command(sigdict, [prefix, command, '-1'])) + assert_equal({}, validate_command(sigdict, [prefix, command, '1', + '1'])) + + def check_0_or_1_natural_arg(self, prefix, command): + self.assert_valid_command([prefix, command, '1']) + self.assert_valid_command([prefix, command]) + assert_equal({}, validate_command(sigdict, [prefix, command, '-1'])) + assert_equal({}, validate_command(sigdict, [prefix, command, '1', + '1'])) + + def check_1_string_arg(self, prefix, command): + assert_equal({}, validate_command(sigdict, [prefix, command])) + self.assert_valid_command([prefix, command, 'string']) + assert_equal({}, validate_command(sigdict, [prefix, + command, + 'string', + 'toomany'])) + + def check_0_or_1_string_arg(self, prefix, command): + self.assert_valid_command([prefix, command, 'string']) + self.assert_valid_command([prefix, command]) + assert_equal({}, validate_command(sigdict, [prefix, command, 'string', + 'toomany'])) + + def check_1_or_more_string_args(self, prefix, command): + assert_equal({}, validate_command(sigdict, [prefix, + command])) + self.assert_valid_command([prefix, + command, + 'string']) + self.assert_valid_command([prefix, + command, + 'string', + 'more string']) + + def check_no_arg(self, prefix, command): + self.assert_valid_command([prefix, + command]) + assert_equal({}, validate_command(sigdict, [prefix, + command, + 'toomany'])) + + def capture_output(self, args, stdout=None, stderr=None): + if stdout: + stdout = StringIO() + sys.stdout = stdout + if stderr: + stderr = StringIO() + sys.stderr = stderr + ret = validate_command(sigdict, args) + if stdout: + stdout = stdout.getvalue().strip() + if stderr: + stderr = stderr.getvalue().strip() + return ret, stdout, stderr + + +class TestBasic: + + def test_non_ascii_in_non_options(self): + # ArgumentPrefix("no match for {0}".format(s)) is not able to convert + # unicode str parameter into str. and validate_command() should not + # choke on it. + assert_equal({}, validate_command(sigdict, [u'章鱼和鱿鱼'])) + assert_equal({}, validate_command(sigdict, [u'–w'])) + # actually we always pass unicode strings to validate_command() in "ceph" + # CLI, but we also use bytestrings in our tests, so make sure it does not + # break. + assert_equal({}, validate_command(sigdict, ['章鱼和鱿鱼'])) + assert_equal({}, validate_command(sigdict, ['–w'])) + + +class TestPG(TestArgparse): + + def test_stat(self): + self.assert_valid_command(['pg', 'stat']) + + def test_getmap(self): + self.assert_valid_command(['pg', 'getmap']) + + def test_dump(self): + self.assert_valid_command(['pg', 'dump']) + self.assert_valid_command(['pg', 'dump', + 'all', + 'summary', + 'sum', + 'delta', + 'pools', + 'osds', + 'pgs', + 'pgs_brief']) + assert_equal({}, validate_command(sigdict, ['pg', 'dump', 'invalid'])) + + def test_dump_json(self): + self.assert_valid_command(['pg', 'dump_json']) + self.assert_valid_command(['pg', 'dump_json', + 'all', + 'summary', + 'sum', + 'pools', + 'osds', + 'pgs']) + assert_equal({}, validate_command(sigdict, ['pg', 'dump_json', + 'invalid'])) + + def test_dump_pools_json(self): + self.assert_valid_command(['pg', 'dump_pools_json']) + + def test_dump_pools_stuck(self): + self.assert_valid_command(['pg', 'dump_stuck']) + self.assert_valid_command(['pg', 'dump_stuck', + 'inactive', + 'unclean', + 'stale']) + assert_equal({}, validate_command(sigdict, ['pg', 'dump_stuck', + 'invalid'])) + self.assert_valid_command(['pg', 'dump_stuck', + 'inactive', + '1234']) + + def one_pgid(self, command): + self.assert_valid_command(['pg', command, '1.1']) + assert_equal({}, validate_command(sigdict, ['pg', command])) + assert_equal({}, validate_command(sigdict, ['pg', command, '1'])) + + def test_map(self): + self.one_pgid('map') + + def test_scrub(self): + self.one_pgid('scrub') + + def test_deep_scrub(self): + self.one_pgid('deep-scrub') + + def test_repair(self): + self.one_pgid('repair') + + def test_debug(self): + self.assert_valid_command(['pg', + 'debug', + 'unfound_objects_exist']) + self.assert_valid_command(['pg', + 'debug', + 'degraded_pgs_exist']) + assert_equal({}, validate_command(sigdict, ['pg', 'debug'])) + assert_equal({}, validate_command(sigdict, ['pg', 'debug', + 'invalid'])) + + def test_pg_missing_args_output(self): + ret, _, stderr = self.capture_output(['pg'], stderr=True) + assert_equal({}, ret) + assert_regexp_matches(stderr, re.compile('no valid command found.* closest matches')) + + def test_pg_wrong_arg_output(self): + ret, _, stderr = self.capture_output(['pg', 'map', 'bad-pgid'], + stderr=True) + assert_equal({}, ret) + assert_in("Invalid command", stderr) + + +class TestAuth(TestArgparse): + + def test_export(self): + self.assert_valid_command(['auth', 'export']) + self.assert_valid_command(['auth', + 'export', + 'string']) + assert_equal({}, validate_command(sigdict, ['auth', + 'export', + 'string', + 'toomany'])) + + def test_get(self): + self.check_1_string_arg('auth', 'get') + + def test_get_key(self): + self.check_1_string_arg('auth', 'get-key') + + def test_print_key(self): + self.check_1_string_arg('auth', 'print-key') + self.check_1_string_arg('auth', 'print_key') + + def test_list(self): + self.check_no_arg('auth', 'list') + + def test_import(self): + self.check_no_arg('auth', 'import') + + def test_add(self): + self.check_1_or_more_string_args('auth', 'add') + + def test_get_or_create_key(self): + self.check_1_or_more_string_args('auth', 'get-or-create-key') + + def test_get_or_create(self): + self.check_1_or_more_string_args('auth', 'get-or-create') + + def test_caps(self): + assert_equal({}, validate_command(sigdict, ['auth', + 'caps'])) + assert_equal({}, validate_command(sigdict, ['auth', + 'caps', + 'string'])) + self.assert_valid_command(['auth', + 'caps', + 'string', + 'more string']) + + def test_del(self): + self.check_1_string_arg('auth', 'del') + + +class TestMonitor(TestArgparse): + + def test_compact(self): + self.assert_valid_command(['compact']) + + def test_scrub(self): + self.assert_valid_command(['scrub']) + + def test_fsid(self): + self.assert_valid_command(['fsid']) + + def test_log(self): + assert_equal({}, validate_command(sigdict, ['log'])) + self.assert_valid_command(['log', 'a logtext']) + self.assert_valid_command(['log', 'a logtext', 'and another']) + + def test_injectargs(self): + assert_equal({}, validate_command(sigdict, ['injectargs'])) + self.assert_valid_command(['injectargs', 'one']) + self.assert_valid_command(['injectargs', 'one', 'two']) + + def test_status(self): + self.assert_valid_command(['status']) + + def test_health(self): + self.assert_valid_command(['health']) + self.assert_valid_command(['health', 'detail']) + assert_equal({}, validate_command(sigdict, ['health', 'invalid'])) + assert_equal({}, validate_command(sigdict, ['health', 'detail', + 'toomany'])) + + def test_df(self): + self.assert_valid_command(['df']) + self.assert_valid_command(['df', 'detail']) + assert_equal({}, validate_command(sigdict, ['df', 'invalid'])) + assert_equal({}, validate_command(sigdict, ['df', 'detail', + 'toomany'])) + + def test_report(self): + self.assert_valid_command(['report']) + self.assert_valid_command(['report', 'tag1']) + self.assert_valid_command(['report', 'tag1', 'tag2']) + + def test_quorum_status(self): + self.assert_valid_command(['quorum_status']) + + def test_mon_status(self): + self.assert_valid_command(['mon_status']) + + def test_sync_force(self): + self.assert_valid_command(['sync', + 'force', + '--yes-i-really-mean-it', + '--i-know-what-i-am-doing']) + self.assert_valid_command(['sync', + 'force', + '--yes-i-really-mean-it']) + self.assert_valid_command(['sync', + 'force']) + assert_equal({}, validate_command(sigdict, ['sync'])) + assert_equal({}, validate_command(sigdict, ['sync', + 'force', + '--yes-i-really-mean-it', + '--i-know-what-i-am-doing', + 'toomany'])) + + def test_heap(self): + assert_equal({}, validate_command(sigdict, ['heap'])) + assert_equal({}, validate_command(sigdict, ['heap', 'invalid'])) + self.assert_valid_command(['heap', 'dump']) + self.assert_valid_command(['heap', 'start_profiler']) + self.assert_valid_command(['heap', 'stop_profiler']) + self.assert_valid_command(['heap', 'release']) + self.assert_valid_command(['heap', 'stats']) + + def test_quorum(self): + assert_equal({}, validate_command(sigdict, ['quorum'])) + assert_equal({}, validate_command(sigdict, ['quorum', 'invalid'])) + self.assert_valid_command(['quorum', 'enter']) + self.assert_valid_command(['quorum', 'exit']) + assert_equal({}, validate_command(sigdict, ['quorum', + 'enter', + 'toomany'])) + + def test_tell(self): + assert_equal({}, validate_command(sigdict, ['tell'])) + assert_equal({}, validate_command(sigdict, ['tell', 'invalid'])) + for name in ('osd', 'mon', 'client', 'mds'): + assert_equal({}, validate_command(sigdict, ['tell', name])) + assert_equal({}, validate_command(sigdict, ['tell', + name + ".42"])) + self.assert_valid_command(['tell', name + ".42", 'something']) + self.assert_valid_command(['tell', name + ".42", + 'something', + 'something else']) + + +class TestMDS(TestArgparse): + + def test_stat(self): + self.check_no_arg('mds', 'stat') + + def test_compat_show(self): + self.assert_valid_command(['mds', 'compat', 'show']) + assert_equal({}, validate_command(sigdict, ['mds', 'compat'])) + assert_equal({}, validate_command(sigdict, ['mds', 'compat', + 'show', 'toomany'])) + + def test_set_state(self): + self.assert_valid_command(['mds', 'set_state', '1', '2']) + assert_equal({}, validate_command(sigdict, ['mds', 'set_state'])) + assert_equal({}, validate_command(sigdict, ['mds', 'set_state', '-1'])) + assert_equal({}, validate_command(sigdict, ['mds', 'set_state', + '1', '-1'])) + assert_equal({}, validate_command(sigdict, ['mds', 'set_state', + '1', '21'])) + + def test_fail(self): + self.check_1_string_arg('mds', 'fail') + + def test_rm(self): + # Valid: single GID argument present + self.assert_valid_command(['mds', 'rm', '1']) + + # Missing GID arg: invalid + assert_equal({}, validate_command(sigdict, ['mds', 'rm'])) + # Extra arg: invalid + assert_equal({}, validate_command(sigdict, ['mds', 'rm', '1', 'mds.42'])) + + def test_rmfailed(self): + self.assert_valid_command(['mds', 'rmfailed', '0']) + self.assert_valid_command(['mds', 'rmfailed', '0', '--yes-i-really-mean-it']) + assert_equal({}, validate_command(sigdict, ['mds', 'rmfailed', '0', + '--yes-i-really-mean-it', + 'toomany'])) + + def test_compat_rm_compat(self): + self.assert_valid_command(['mds', 'compat', 'rm_compat', '1']) + assert_equal({}, validate_command(sigdict, ['mds', + 'compat', + 'rm_compat'])) + assert_equal({}, validate_command(sigdict, ['mds', + 'compat', + 'rm_compat', '-1'])) + assert_equal({}, validate_command(sigdict, ['mds', + 'compat', + 'rm_compat', '1', '1'])) + + def test_incompat_rm_incompat(self): + self.assert_valid_command(['mds', 'compat', 'rm_incompat', '1']) + assert_equal({}, validate_command(sigdict, ['mds', + 'compat', + 'rm_incompat'])) + assert_equal({}, validate_command(sigdict, ['mds', + 'compat', + 'rm_incompat', '-1'])) + assert_equal({}, validate_command(sigdict, ['mds', + 'compat', + 'rm_incompat', '1', '1'])) + + +class TestFS(TestArgparse): + + def test_dump(self): + self.check_0_or_1_natural_arg('fs', 'dump') + + def test_fs_new(self): + self.assert_valid_command(['fs', 'new', 'default', 'metadata', 'data']) + + def test_fs_set_max_mds(self): + self.assert_valid_command(['fs', 'set', 'default', 'max_mds', '1']) + self.assert_valid_command(['fs', 'set', 'default', 'max_mds', '2']) + + def test_fs_set_cluster_down(self): + self.assert_valid_command(['fs', 'set', 'default', 'down', 'true']) + + def test_fs_set_cluster_up(self): + self.assert_valid_command(['fs', 'set', 'default', 'down', 'false']) + + def test_fs_set_cluster_joinable(self): + self.assert_valid_command(['fs', 'set', 'default', 'joinable', 'true']) + + def test_fs_set_cluster_not_joinable(self): + self.assert_valid_command(['fs', 'set', 'default', 'joinable', 'false']) + + def test_fs_set(self): + self.assert_valid_command(['fs', 'set', 'default', 'max_file_size', '2']) + self.assert_valid_command(['fs', 'set', 'default', 'allow_new_snaps', 'no']) + assert_equal({}, validate_command(sigdict, ['fs', + 'set', + 'invalid'])) + + def test_fs_add_data_pool(self): + self.assert_valid_command(['fs', 'add_data_pool', 'default', '1']) + self.assert_valid_command(['fs', 'add_data_pool', 'default', 'foo']) + + def test_fs_remove_data_pool(self): + self.assert_valid_command(['fs', 'rm_data_pool', 'default', '1']) + self.assert_valid_command(['fs', 'rm_data_pool', 'default', 'foo']) + + def test_fs_rm(self): + self.assert_valid_command(['fs', 'rm', 'default']) + self.assert_valid_command(['fs', 'rm', 'default', '--yes-i-really-mean-it']) + assert_equal({}, validate_command(sigdict, ['fs', 'rm', 'default', '--yes-i-really-mean-it', 'toomany'])) + + def test_fs_ls(self): + self.assert_valid_command(['fs', 'ls']) + assert_equal({}, validate_command(sigdict, ['fs', 'ls', 'toomany'])) + + def test_fs_set_default(self): + self.assert_valid_command(['fs', 'set-default', 'cephfs']) + assert_equal({}, validate_command(sigdict, ['fs', 'set-default'])) + assert_equal({}, validate_command(sigdict, ['fs', 'set-default', 'cephfs', 'toomany'])) + + +class TestMon(TestArgparse): + + def test_dump(self): + self.check_0_or_1_natural_arg('mon', 'dump') + + def test_stat(self): + self.check_no_arg('mon', 'stat') + + def test_getmap(self): + self.check_0_or_1_natural_arg('mon', 'getmap') + + def test_add(self): + self.assert_valid_command(['mon', 'add', 'name', '1.2.3.4:1234']) + assert_equal({}, validate_command(sigdict, ['mon', 'add'])) + assert_equal({}, validate_command(sigdict, ['mon', 'add', 'name'])) + assert_equal({}, validate_command(sigdict, ['mon', 'add', + 'name', + '400.500.600.700'])) + assert_equal({}, validate_command(sigdict, ['mon', 'add', 'name', + '1.2.3.4:1234', + 'toomany'])) + + def test_remove(self): + self.assert_valid_command(['mon', 'remove', 'name']) + assert_equal({}, validate_command(sigdict, ['mon', 'remove'])) + assert_equal({}, validate_command(sigdict, ['mon', 'remove', + 'name', 'toomany'])) + + +class TestOSD(TestArgparse): + + def test_stat(self): + self.check_no_arg('osd', 'stat') + + def test_dump(self): + self.check_0_or_1_natural_arg('osd', 'dump') + + def test_osd_tree(self): + self.check_0_or_1_natural_arg('osd', 'tree') + + def test_osd_ls(self): + self.check_0_or_1_natural_arg('osd', 'ls') + + def test_osd_getmap(self): + self.check_0_or_1_natural_arg('osd', 'getmap') + + def test_osd_getcrushmap(self): + self.check_0_or_1_natural_arg('osd', 'getcrushmap') + + def test_perf(self): + self.check_no_arg('osd', 'perf') + + def test_getmaxosd(self): + self.check_no_arg('osd', 'getmaxosd') + + def test_find(self): + self.check_1_natural_arg('osd', 'find') + + def test_map(self): + self.assert_valid_command(['osd', 'map', 'poolname', 'objectname']) + self.assert_valid_command(['osd', 'map', 'poolname', 'objectname', 'nspace']) + assert_equal({}, validate_command(sigdict, ['osd', 'map'])) + assert_equal({}, validate_command(sigdict, ['osd', 'map', 'poolname'])) + assert_equal({}, validate_command(sigdict, ['osd', 'map', + 'poolname', 'objectname', 'nspace', + 'toomany'])) + + def test_metadata(self): + self.check_0_or_1_natural_arg('osd', 'metadata') + + def test_scrub(self): + self.check_1_string_arg('osd', 'scrub') + + def test_deep_scrub(self): + self.check_1_string_arg('osd', 'deep-scrub') + + def test_repair(self): + self.check_1_string_arg('osd', 'repair') + + def test_lspools(self): + self.assert_valid_command(['osd', 'lspools']) + assert_equal({}, validate_command(sigdict, ['osd', 'lspools', + 'toomany'])) + + def test_blacklist_ls(self): + self.assert_valid_command(['osd', 'blacklist', 'ls']) + assert_equal({}, validate_command(sigdict, ['osd', 'blacklist'])) + assert_equal({}, validate_command(sigdict, ['osd', 'blacklist', + 'ls', 'toomany'])) + + def test_crush_rule(self): + assert_equal({}, validate_command(sigdict, ['osd', 'crush'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', 'rule'])) + for subcommand in ('list', 'ls'): + self.assert_valid_command(['osd', 'crush', 'rule', subcommand]) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'rule', subcommand, + 'toomany'])) + + def test_crush_rule_dump(self): + self.assert_valid_command(['osd', 'crush', 'rule', 'dump']) + self.assert_valid_command(['osd', 'crush', 'rule', 'dump', 'RULE']) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'rule', 'dump', + 'RULE', + 'toomany'])) + + def test_crush_dump(self): + self.assert_valid_command(['osd', 'crush', 'dump']) + assert_equal({}, validate_command(sigdict, ['osd', 'crush'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'dump', + 'toomany'])) + + def test_setcrushmap(self): + self.check_no_arg('osd', 'setcrushmap') + + def test_crush_add_bucket(self): + self.assert_valid_command(['osd', 'crush', 'add-bucket', + 'name', 'type']) + self.assert_valid_command(['osd', 'crush', 'add-bucket', + 'name', 'type', 'root=foo-root', 'host=foo-host']) + assert_equal({}, validate_command(sigdict, ['osd', 'crush'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'add-bucket'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'add-bucket', '^^^', + 'type'])) + + def test_crush_rename_bucket(self): + self.assert_valid_command(['osd', 'crush', 'rename-bucket', + 'srcname', 'dstname']) + assert_equal({}, validate_command(sigdict, ['osd', 'crush'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'rename-bucket'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'rename-bucket', + 'srcname'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'rename-bucket', 'srcname', + 'dstname', + 'toomany'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'rename-bucket', '^^^', + 'dstname'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'rename-bucket', 'srcname', + '^^^^'])) + + def check_crush_setter(self, setter): + self.assert_valid_command(['osd', 'crush', setter, + '*', '2.3', 'AZaz09-_.=']) + self.assert_valid_command(['osd', 'crush', setter, + 'osd.0', '2.3', 'AZaz09-_.=']) + self.assert_valid_command(['osd', 'crush', setter, + '0', '2.3', 'AZaz09-_.=']) + self.assert_valid_command(['osd', 'crush', setter, + '0', '2.3', 'AZaz09-_.=', 'AZaz09-_.=']) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + setter, + 'osd.0'])) + ret = validate_command(sigdict, ['osd', 'crush', + setter, + 'osd.0', + '-1.0']) + assert ret in [None, {}] + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + setter, + 'osd.0', + '1.0', + '^^^'])) + + def test_crush_set(self): + assert_equal({}, validate_command(sigdict, ['osd', 'crush'])) + self.check_crush_setter('set') + + def test_crush_add(self): + assert_equal({}, validate_command(sigdict, ['osd', 'crush'])) + self.check_crush_setter('add') + + def test_crush_create_or_move(self): + assert_equal({}, validate_command(sigdict, ['osd', 'crush'])) + self.check_crush_setter('create-or-move') + + def test_crush_move(self): + self.assert_valid_command(['osd', 'crush', 'move', + 'AZaz09-_.', 'AZaz09-_.=']) + self.assert_valid_command(['osd', 'crush', 'move', + '0', 'AZaz09-_.=', 'AZaz09-_.=']) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'move'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'move', 'AZaz09-_.'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'move', '^^^', + 'AZaz09-_.='])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'move', 'AZaz09-_.', + '^^^'])) + + def test_crush_link(self): + self.assert_valid_command(['osd', 'crush', 'link', + 'name', 'AZaz09-_.=']) + self.assert_valid_command(['osd', 'crush', 'link', + 'name', 'AZaz09-_.=', 'AZaz09-_.=']) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'link'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'link', + 'name'])) + + def test_crush_rm(self): + for alias in ('rm', 'remove', 'unlink'): + self.assert_valid_command(['osd', 'crush', alias, 'AZaz09-_.']) + self.assert_valid_command(['osd', 'crush', alias, + 'AZaz09-_.', 'AZaz09-_.']) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + alias])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + alias, + 'AZaz09-_.', + 'AZaz09-_.', + 'toomany'])) + + def test_crush_reweight(self): + self.assert_valid_command(['osd', 'crush', 'reweight', + 'AZaz09-_.', '2.3']) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'reweight'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'reweight', + 'AZaz09-_.'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'reweight', + 'AZaz09-_.', + '-1.0'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'reweight', + '^^^', + '2.3'])) + + def test_crush_tunables(self): + for tunable in ('legacy', 'argonaut', 'bobtail', 'firefly', + 'optimal', 'default'): + self.assert_valid_command(['osd', 'crush', 'tunables', + tunable]) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'tunables'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'tunables', + 'default', 'toomany'])) + + def test_crush_rule_create_simple(self): + self.assert_valid_command(['osd', 'crush', 'rule', 'create-simple', + 'AZaz09-_.', 'AZaz09-_.', 'AZaz09-_.']) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-simple'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-simple', + 'AZaz09-_.'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-simple', + 'AZaz09-_.', + 'AZaz09-_.'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-simple', + '^^^', + 'AZaz09-_.', + 'AZaz09-_.'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-simple', + 'AZaz09-_.', + '|||', + 'AZaz09-_.'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-simple', + 'AZaz09-_.', + 'AZaz09-_.', + '+++'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-simple', + 'AZaz09-_.', + 'AZaz09-_.', + 'AZaz09-_.', + 'toomany'])) + + def test_crush_rule_create_erasure(self): + self.assert_valid_command(['osd', 'crush', 'rule', 'create-erasure', + 'AZaz09-_.']) + self.assert_valid_command(['osd', 'crush', 'rule', 'create-erasure', + 'AZaz09-_.', 'whatever']) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-erasure'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-erasure', + '^^^'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-erasure', + 'name', '^^^'])) + + def test_crush_rule_rm(self): + self.assert_valid_command(['osd', 'crush', 'rule', 'rm', 'AZaz09-_.']) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'rule', 'rm'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'rule', 'rm', + '^^^^'])) + assert_equal({}, validate_command(sigdict, ['osd', 'crush', + 'rule', 'rm', + 'AZaz09-_.', + 'toomany'])) + + def test_setmaxosd(self): + self.check_1_natural_arg('osd', 'setmaxosd') + + def test_pause(self): + self.check_no_arg('osd', 'pause') + + def test_unpause(self): + self.check_no_arg('osd', 'unpause') + + def test_erasure_code_profile_set(self): + self.assert_valid_command(['osd', 'erasure-code-profile', 'set', + 'name']) + self.assert_valid_command(['osd', 'erasure-code-profile', 'set', + 'name', 'A=B']) + self.assert_valid_command(['osd', 'erasure-code-profile', 'set', + 'name', 'A=B', 'C=D']) + assert_equal({}, validate_command(sigdict, ['osd', + 'erasure-code-profile', + 'set'])) + assert_equal({}, validate_command(sigdict, ['osd', + 'erasure-code-profile', + 'set', + '^^^^'])) + + def test_erasure_code_profile_get(self): + self.assert_valid_command(['osd', 'erasure-code-profile', 'get', + 'name']) + assert_equal({}, validate_command(sigdict, ['osd', + 'erasure-code-profile', + 'get'])) + assert_equal({}, validate_command(sigdict, ['osd', + 'erasure-code-profile', + 'get', + '^^^^'])) + + def test_erasure_code_profile_rm(self): + self.assert_valid_command(['osd', 'erasure-code-profile', 'rm', + 'name']) + assert_equal({}, validate_command(sigdict, ['osd', + 'erasure-code-profile', + 'rm'])) + assert_equal({}, validate_command(sigdict, ['osd', + 'erasure-code-profile', + 'rm', + '^^^^'])) + + def test_erasure_code_profile_ls(self): + self.assert_valid_command(['osd', 'erasure-code-profile', 'ls']) + assert_equal({}, validate_command(sigdict, ['osd', + 'erasure-code-profile', + 'ls', + 'toomany'])) + + def test_set_unset(self): + for action in ('set', 'unset'): + for flag in ('pause', 'noup', 'nodown', 'noout', 'noin', + 'nobackfill', 'norecover', 'noscrub', 'nodeep-scrub'): + self.assert_valid_command(['osd', action, flag]) + assert_equal({}, validate_command(sigdict, ['osd', action])) + assert_equal({}, validate_command(sigdict, ['osd', action, + 'invalid'])) + assert_equal({}, validate_command(sigdict, ['osd', action, + 'pause', 'toomany'])) + + def test_down(self): + self.check_1_or_more_string_args('osd', 'down') + + def test_out(self): + self.check_1_or_more_string_args('osd', 'out') + + def test_in(self): + self.check_1_or_more_string_args('osd', 'in') + + def test_rm(self): + self.check_1_or_more_string_args('osd', 'rm') + + def test_reweight(self): + self.assert_valid_command(['osd', 'reweight', '1', '0.1']) + assert_equal({}, validate_command(sigdict, ['osd', 'reweight'])) + assert_equal({}, validate_command(sigdict, ['osd', 'reweight', + '1'])) + assert_equal({}, validate_command(sigdict, ['osd', 'reweight', + '1', '2.0'])) + assert_equal({}, validate_command(sigdict, ['osd', 'reweight', + '-1', '0.1'])) + assert_equal({}, validate_command(sigdict, ['osd', 'reweight', + '1', '0.1', + 'toomany'])) + + def test_lost(self): + self.assert_valid_command(['osd', 'lost', '1', + '--yes-i-really-mean-it']) + self.assert_valid_command(['osd', 'lost', '1']) + assert_equal({}, validate_command(sigdict, ['osd', 'lost'])) + assert_equal({}, validate_command(sigdict, ['osd', 'lost', + '1', + 'what?'])) + assert_equal({}, validate_command(sigdict, ['osd', 'lost', + '-1', + '--yes-i-really-mean-it'])) + assert_equal({}, validate_command(sigdict, ['osd', 'lost', + '1', + '--yes-i-really-mean-it', + 'toomany'])) + + def test_create(self): + uuid = '12345678123456781234567812345678' + self.assert_valid_command(['osd', 'create']) + self.assert_valid_command(['osd', 'create', + uuid]) + assert_equal({}, validate_command(sigdict, ['osd', 'create', + 'invalid'])) + assert_equal({}, validate_command(sigdict, ['osd', 'create', + uuid, + 'toomany'])) + + def test_blacklist(self): + for action in ('add', 'rm'): + self.assert_valid_command(['osd', 'blacklist', action, + '1.2.3.4/567']) + self.assert_valid_command(['osd', 'blacklist', action, + '1.2.3.4']) + self.assert_valid_command(['osd', 'blacklist', action, + '1.2.3.4/567', '600.40']) + self.assert_valid_command(['osd', 'blacklist', action, + '1.2.3.4', '600.40']) + assert_equal({}, validate_command(sigdict, ['osd', 'blacklist', + action, + 'invalid', + '600.40'])) + assert_equal({}, validate_command(sigdict, ['osd', 'blacklist', + action, + '1.2.3.4/567', + '-1.0'])) + assert_equal({}, validate_command(sigdict, ['osd', 'blacklist', + action, + '1.2.3.4/567', + '600.40', + 'toomany'])) + + def test_pool_mksnap(self): + self.assert_valid_command(['osd', 'pool', 'mksnap', + 'poolname', 'snapname']) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'mksnap'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'mksnap', + 'poolname'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'mksnap', + 'poolname', 'snapname', + 'toomany'])) + + def test_pool_rmsnap(self): + self.assert_valid_command(['osd', 'pool', 'rmsnap', + 'poolname', 'snapname']) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'rmsnap'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'rmsnap', + 'poolname'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'rmsnap', + 'poolname', 'snapname', + 'toomany'])) + + def test_pool_kwargs(self): + """ + Use the pool creation command to exercise keyword-style arguments + since it has lots of parameters + """ + # Simply use a keyword arg instead of a positional arg, in its + # normal order (pgp_num after pg_num) + assert_equal( + { + "prefix": "osd pool create", + "pool": "foo", + "pg_num": 8, + "pgp_num": 16 + }, validate_command(sigdict, [ + 'osd', 'pool', 'create', "foo", "8", "--pgp_num", "16"])) + + # Again, but using the "--foo=bar" style + assert_equal( + { + "prefix": "osd pool create", + "pool": "foo", + "pg_num": 8, + "pgp_num": 16 + }, validate_command(sigdict, [ + 'osd', 'pool', 'create', "foo", "8", "--pgp_num=16"])) + + # Specify keyword args in a different order than their definitions + # (pgp_num after pool_type) + assert_equal( + { + "prefix": "osd pool create", + "pool": "foo", + "pg_num": 8, + "pgp_num": 16, + "pool_type": "replicated" + }, validate_command(sigdict, [ + 'osd', 'pool', 'create', "foo", "8", + "--pool_type", "replicated", + "--pgp_num", "16"])) + + # Use a keyword argument that doesn't exist, should fail validation + assert_equal({}, validate_command(sigdict, + ['osd', 'pool', 'create', "foo", "8", "--foo=bar"])) + + def test_foo(self): + # Long form of a boolean argument (--foo=true) + assert_equal( + { + "prefix": "osd pool delete", + "pool": "foo", + "pool2": "foo", + "yes_i_really_really_mean_it": True + }, validate_command(sigdict, [ + 'osd', 'pool', 'delete', "foo", "foo", + "--yes-i-really-really-mean-it=true"])) + + def test_pool_bool_args(self): + """ + Use pool deletion to exercise boolean arguments since it has + the --yes-i-really-really-mean-it flags + """ + + # Short form of a boolean argument (--foo) + assert_equal( + { + "prefix": "osd pool delete", + "pool": "foo", + "pool2": "foo", + "yes_i_really_really_mean_it": True + }, validate_command(sigdict, [ + 'osd', 'pool', 'delete', "foo", "foo", + "--yes-i-really-really-mean-it"])) + + # Long form of a boolean argument (--foo=true) + assert_equal( + { + "prefix": "osd pool delete", + "pool": "foo", + "pool2": "foo", + "yes_i_really_really_mean_it": True + }, validate_command(sigdict, [ + 'osd', 'pool', 'delete', "foo", "foo", + "--yes-i-really-really-mean-it=true"])) + + # Negative form of a boolean argument (--foo=false) + assert_equal( + { + "prefix": "osd pool delete", + "pool": "foo", + "pool2": "foo", + "yes_i_really_really_mean_it": False + }, validate_command(sigdict, [ + 'osd', 'pool', 'delete', "foo", "foo", + "--yes-i-really-really-mean-it=false"])) + + # Invalid value boolean argument (--foo=somethingelse) + assert_equal({}, validate_command(sigdict, [ + 'osd', 'pool', 'delete', "foo", "foo", + "--yes-i-really-really-mean-it=rhubarb"])) + + def test_pool_create(self): + self.assert_valid_command(['osd', 'pool', 'create', + 'poolname', '128']) + self.assert_valid_command(['osd', 'pool', 'create', + 'poolname', '128', '128']) + self.assert_valid_command(['osd', 'pool', 'create', + 'poolname', '128', '128', + 'replicated']) + self.assert_valid_command(['osd', 'pool', 'create', + 'poolname', '128', '128', + 'erasure', 'A-Za-z0-9-_.', 'ruleset^^']) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'create'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'create', + 'poolname'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'create', + 'poolname', '-1'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'create', + 'poolname', + '128', '128', + 'erasure', '^^^', + 'ruleset'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'create', + 'poolname', + '128', '128', + 'erasure', 'profile', + 'ruleset', + 'toomany'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'create', + 'poolname', + '128', '128', + 'INVALID', 'profile', + 'ruleset'])) + + def test_pool_delete(self): + self.assert_valid_command(['osd', 'pool', 'delete', + 'poolname', 'poolname', + '--yes-i-really-really-mean-it']) + self.assert_valid_command(['osd', 'pool', 'delete', + 'poolname', 'poolname']) + self.assert_valid_command(['osd', 'pool', 'delete', + 'poolname']) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'delete'])) + assert_equal({}, validate_command(sigdict, + ['osd', 'pool', 'delete', + 'poolname', 'poolname', + '--yes-i-really-really-mean-it', + 'toomany'])) + + def test_pool_rename(self): + self.assert_valid_command(['osd', 'pool', 'rename', + 'poolname', 'othername']) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'rename'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'rename', + 'poolname'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', 'rename', + 'poolname', 'othername', + 'toomany'])) + + def test_pool_get(self): + for var in ('size', 'min_size', + 'pg_num', 'pgp_num', 'crush_rule', 'fast_read', + 'scrub_min_interval', 'scrub_max_interval', + 'deep_scrub_interval', 'recovery_priority', + 'recovery_op_priority'): + self.assert_valid_command(['osd', 'pool', 'get', 'poolname', var]) + assert_equal({}, validate_command(sigdict, ['osd', 'pool'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', + 'get'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', + 'get', 'poolname'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', + 'get', 'poolname', + 'size', 'toomany'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', + 'get', 'poolname', + 'invalid'])) + + def test_pool_set(self): + for var in ('size', 'min_size', + 'pg_num', 'pgp_num', 'crush_rule', + 'hashpspool', 'fast_read', + 'scrub_min_interval', 'scrub_max_interval', + 'deep_scrub_interval', 'recovery_priority', + 'recovery_op_priority'): + self.assert_valid_command(['osd', 'pool', + 'set', 'poolname', var, 'value']) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', + 'set'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', + 'set', 'poolname'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', + 'set', 'poolname', + 'size', 'value', + 'toomany'])) + + def test_pool_set_quota(self): + for field in ('max_objects', 'max_bytes'): + self.assert_valid_command(['osd', 'pool', 'set-quota', + 'poolname', field, '10K']) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', + 'set-quota'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', + 'set-quota', + 'poolname'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', + 'set-quota', + 'poolname', + 'max_objects'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', + 'set-quota', + 'poolname', + 'invalid', + '10K'])) + assert_equal({}, validate_command(sigdict, ['osd', 'pool', + 'set-quota', + 'poolname', + 'max_objects', + '10K', + 'toomany'])) + + def test_reweight_by_utilization(self): + self.assert_valid_command(['osd', 'reweight-by-utilization']) + self.assert_valid_command(['osd', 'reweight-by-utilization', '100']) + self.assert_valid_command(['osd', 'reweight-by-utilization', '100', '.1']) + assert_equal({}, validate_command(sigdict, ['osd', + 'reweight-by-utilization', + '100', + 'toomany'])) + + def test_tier_op(self): + for op in ('add', 'remove', 'set-overlay'): + self.assert_valid_command(['osd', 'tier', op, + 'poolname', 'othername']) + assert_equal({}, validate_command(sigdict, ['osd', 'tier', op])) + assert_equal({}, validate_command(sigdict, ['osd', 'tier', op, + 'poolname'])) + assert_equal({}, validate_command(sigdict, ['osd', 'tier', op, + 'poolname', + 'othername', + 'toomany'])) + + def test_tier_cache_mode(self): + for mode in ('none', 'writeback', 'forward', 'readonly', 'readforward', 'readproxy'): + self.assert_valid_command(['osd', 'tier', 'cache-mode', + 'poolname', mode]) + assert_equal({}, validate_command(sigdict, ['osd', 'tier', + 'cache-mode'])) + assert_equal({}, validate_command(sigdict, ['osd', 'tier', + 'cache-mode', + 'invalid'])) + + def test_tier_remove_overlay(self): + self.assert_valid_command(['osd', 'tier', 'remove-overlay', + 'poolname']) + assert_equal({}, validate_command(sigdict, ['osd', 'tier', + 'remove-overlay'])) + assert_equal({}, validate_command(sigdict, ['osd', 'tier', + 'remove-overlay', + 'poolname', + 'toomany'])) + + def set_ratio(self, command): + self.assert_valid_command(['osd', + command, + '0.0']) + assert_equal({}, validate_command(sigdict, ['osd', command])) + assert_equal({}, validate_command(sigdict, ['osd', + command, + '2.0'])) + + def test_set_full_ratio(self): + self.set_ratio('set-full-ratio') + + def test_set_backfillfull_ratio(self): + self.set_ratio('set-backfillfull-ratio') + + def test_set_nearfull_ratio(self): + self.set_ratio('set-nearfull-ratio') + + +class TestConfigKey(TestArgparse): + + def test_get(self): + self.check_1_string_arg('config-key', 'get') + + def test_put(self): + self.assert_valid_command(['config-key', 'put', + 'key']) + self.assert_valid_command(['config-key', 'put', + 'key', 'value']) + assert_equal({}, validate_command(sigdict, ['config-key', 'put'])) + assert_equal({}, validate_command(sigdict, ['config-key', 'put', + 'key', 'value', + 'toomany'])) + + def test_del(self): + self.check_1_string_arg('config-key', 'del') + + def test_exists(self): + self.check_1_string_arg('config-key', 'exists') + + def test_dump(self): + self.check_0_or_1_string_arg('config-key', 'dump') + + def test_list(self): + self.check_no_arg('config-key', 'list') + + +class TestValidate(TestCase): + + ARGS = 0 + KWARGS = 1 + KWARGS_EQ = 2 + MIXED = 3 + + def setUp(self): + self.prefix = ['some', 'random', 'cmd'] + self.args_dict = [ + {'name': 'variable_one', 'type': 'CephString'}, + {'name': 'variable_two', 'type': 'CephString'}, + {'name': 'variable_three', 'type': 'CephString'}, + {'name': 'variable_four', 'type': 'CephInt'}, + {'name': 'variable_five', 'type': 'CephString'}] + self.args = [] + for d in self.args_dict: + if d['type'] == 'CephInt': + val = "{}".format(random.randint(0, 100)) + elif d['type'] == 'CephString': + letters = string.ascii_letters + str_len = random.randint(5, 10) + val = ''.join(random.choice(letters) for _ in range(str_len)) + else: + self.skipTest() + + self.args.append((d['name'], val)) + + self.sig = parse_funcsig(self.prefix + self.args_dict) + + @nottest + def arg_kwarg_test(self, prefix, args, sig, arg_type=0): + """ + Runs validate in different arg/kargs ways. + + :param prefix: List of prefix commands (that can't be kwarged) + :param args: a list of kwarg, arg pairs: [(k1, v1), (k2, v2), ...] + :param sig: The sig to match + :param arg_type: how to build the args to send. As positional args (ARGS), + as long kwargs (KWARGS [--k v]), other style long kwargs + (KWARGS_EQ (--k=v]), and mixed (MIXED) where there will be + a random mix of the above. + :return: None, the method will assert. + """ + final_args = list(prefix) + for k, v in args: + a_type = arg_type + if a_type == self.MIXED: + a_type = random.choice((self.ARGS, + self.KWARGS, + self.KWARGS_EQ)) + if a_type == self.ARGS: + final_args.append(v) + elif a_type == self.KWARGS: + final_args.extend(["--{}".format(k), v]) + else: + final_args.append("--{}={}".format(k, v)) + + try: + validate(final_args, sig) + except (ArgumentError, ArgumentMissing, + ArgumentNumber, ArgumentTooFew, ArgumentValid) as ex: + self.fail("Validation failed: {}".format(str(ex))) + + def test_args_and_kwargs_validate(self): + for arg_type in (self.ARGS, self.KWARGS, self.KWARGS_EQ, self.MIXED): + self.arg_kwarg_test(self.prefix, self.args, self.sig, arg_type) + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && +# PYTHONPATH=pybind nosetests --stop \ +# test/pybind/test_ceph_argparse.py # test_ceph_argparse.py:TestOSD.test_rm" +# End: diff --git a/src/test/pybind/test_ceph_daemon.py b/src/test/pybind/test_ceph_daemon.py new file mode 100755 index 00000000..64a0ea39 --- /dev/null +++ b/src/test/pybind/test_ceph_daemon.py @@ -0,0 +1,47 @@ +#!/usr/bin/env nosetests +# -*- mode:python; tab-width:4; indent-tabs-mode:t -*- +# vim: ts=4 sw=4 smarttab expandtab +# +""" +Copyright (C) 2015 Red Hat + +This is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License version 2, as published by the Free Software +Foundation. See file COPYING. +""" + +from unittest import TestCase + +from ceph_daemon import DaemonWatcher + +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + + +class TestDaemonWatcher(TestCase): + def test_format(self): + dw = DaemonWatcher(None) + + self.assertEqual(dw.format_dimless(1, 4), " 1 ") + self.assertEqual(dw.format_dimless(1000, 4), "1.0k") + self.assertEqual(dw.format_dimless(3.14159, 4), " 3 ") + self.assertEqual(dw.format_dimless(1400000, 4), "1.4M") + + def test_col_width(self): + dw = DaemonWatcher(None) + + self.assertEqual(dw.col_width("foo"), 4) + self.assertEqual(dw.col_width("foobar"), 6) + + def test_supports_color(self): + dw = DaemonWatcher(None) + # Can't count on having a tty available during tests, so only test the false case + self.assertEqual(dw.supports_color(StringIO()), False) +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && +# PYTHONPATH=pybind nosetests --stop \ +# test/pybind/test_ceph_daemon.py +# End: diff --git a/src/test/pybind/test_cephfs.py b/src/test/pybind/test_cephfs.py new file mode 100644 index 00000000..93aa623e --- /dev/null +++ b/src/test/pybind/test_cephfs.py @@ -0,0 +1,434 @@ +# vim: expandtab smarttab shiftwidth=4 softtabstop=4 +from nose.tools import assert_raises, assert_equal, assert_greater, with_setup +import cephfs as libcephfs +import fcntl +import os +import time +from datetime import datetime + +cephfs = None + +def setup_module(): + global cephfs + cephfs = libcephfs.LibCephFS(conffile='') + cephfs.mount() + +def teardown_module(): + global cephfs + cephfs.shutdown() + +def setup_test(): + d = cephfs.opendir(b"/") + dent = cephfs.readdir(d) + while dent: + if (dent.d_name not in [b".", b".."]): + if dent.is_dir(): + cephfs.rmdir(b"/" + dent.d_name) + else: + cephfs.unlink(b"/" + dent.d_name) + + dent = cephfs.readdir(d) + + cephfs.closedir(d) + + cephfs.chdir(b"/") + +@with_setup(setup_test) +def test_conf_get(): + fsid = cephfs.conf_get("fsid") + assert(len(fsid) > 0) + +@with_setup(setup_test) +def test_version(): + cephfs.version() + +@with_setup(setup_test) +def test_fstat(): + fd = cephfs.open(b'file-1', 'w', 0o755) + stat = cephfs.fstat(fd) + assert(len(stat) == 13) + cephfs.close(fd) + +@with_setup(setup_test) +def test_statfs(): + stat = cephfs.statfs(b'/') + assert(len(stat) == 11) + +@with_setup(setup_test) +def test_statx(): + stat = cephfs.statx(b'/', libcephfs.CEPH_STATX_MODE, 0) + assert('mode' in stat.keys()) + stat = cephfs.statx(b'/', libcephfs.CEPH_STATX_BTIME, 0) + assert('btime' in stat.keys()) + + fd = cephfs.open(b'file-1', 'w', 0o755) + cephfs.write(fd, b"1111", 0) + cephfs.close(fd) + cephfs.symlink(b'file-1', b'file-2') + stat = cephfs.statx(b'file-2', libcephfs.CEPH_STATX_MODE | libcephfs.CEPH_STATX_BTIME, libcephfs.AT_SYMLINK_NOFOLLOW) + assert('mode' in stat.keys()) + assert('btime' in stat.keys()) + cephfs.unlink(b'file-2') + cephfs.unlink(b'file-1') + +@with_setup(setup_test) +def test_syncfs(): + stat = cephfs.sync_fs() + +@with_setup(setup_test) +def test_fsync(): + fd = cephfs.open(b'file-1', 'w', 0o755) + cephfs.write(fd, b"asdf", 0) + stat = cephfs.fsync(fd, 0) + cephfs.write(fd, b"qwer", 0) + stat = cephfs.fsync(fd, 1) + cephfs.close(fd) + #sync on non-existing fd (assume fd 12345 is not exists) + assert_raises(libcephfs.Error, cephfs.fsync, 12345, 0) + +@with_setup(setup_test) +def test_directory(): + cephfs.mkdir(b"/temp-directory", 0o755) + cephfs.mkdirs(b"/temp-directory/foo/bar", 0o755) + cephfs.chdir(b"/temp-directory") + assert_equal(cephfs.getcwd(), b"/temp-directory") + cephfs.rmdir(b"/temp-directory/foo/bar") + cephfs.rmdir(b"/temp-directory/foo") + cephfs.rmdir(b"/temp-directory") + assert_raises(libcephfs.ObjectNotFound, cephfs.chdir, b"/temp-directory") + +@with_setup(setup_test) +def test_walk_dir(): + cephfs.chdir(b"/") + dirs = [b"dir-1", b"dir-2", b"dir-3"] + for i in dirs: + cephfs.mkdir(i, 0o755) + handler = cephfs.opendir(b"/") + d = cephfs.readdir(handler) + dirs += [b".", b".."] + while d: + assert(d.d_name in dirs) + dirs.remove(d.d_name) + d = cephfs.readdir(handler) + assert(len(dirs) == 0) + dirs = [b"/dir-1", b"/dir-2", b"/dir-3"] + for i in dirs: + cephfs.rmdir(i) + cephfs.closedir(handler) + +@with_setup(setup_test) +def test_xattr(): + assert_raises(libcephfs.OperationNotSupported, cephfs.setxattr, "/", "key", b"value", 0) + cephfs.setxattr("/", "user.key", b"value", 0) + assert_equal(b"value", cephfs.getxattr("/", "user.key")) + + cephfs.setxattr("/", "user.big", b"x" * 300, 0) + + # Default size is 255, get ERANGE + assert_raises(libcephfs.OutOfRange, cephfs.getxattr, "/", "user.big") + + # Pass explicit size, and we'll get the value + assert_equal(300, len(cephfs.getxattr("/", "user.big", 300))) + + +@with_setup(setup_test) +def test_rename(): + cephfs.mkdir(b"/a", 0o755) + cephfs.mkdir(b"/a/b", 0o755) + cephfs.rename(b"/a", b"/b") + cephfs.stat(b"/b/b") + cephfs.rmdir(b"/b/b") + cephfs.rmdir(b"/b") + +@with_setup(setup_test) +def test_open(): + assert_raises(libcephfs.ObjectNotFound, cephfs.open, b'file-1', 'r') + assert_raises(libcephfs.ObjectNotFound, cephfs.open, b'file-1', 'r+') + fd = cephfs.open(b'file-1', 'w', 0o755) + cephfs.write(fd, b"asdf", 0) + cephfs.close(fd) + fd = cephfs.open(b'file-1', 'r', 0o755) + assert_equal(cephfs.read(fd, 0, 4), b"asdf") + cephfs.close(fd) + fd = cephfs.open(b'file-1', 'r+', 0o755) + cephfs.write(fd, b"zxcv", 4) + assert_equal(cephfs.read(fd, 4, 8), b"zxcv") + cephfs.close(fd) + fd = cephfs.open(b'file-1', 'w+', 0o755) + assert_equal(cephfs.read(fd, 0, 4), b"") + cephfs.write(fd, b"zxcv", 4) + assert_equal(cephfs.read(fd, 4, 8), b"zxcv") + cephfs.close(fd) + fd = cephfs.open(b'file-1', os.O_RDWR, 0o755) + cephfs.write(fd, b"asdf", 0) + assert_equal(cephfs.read(fd, 0, 4), b"asdf") + cephfs.close(fd) + assert_raises(libcephfs.OperationNotSupported, cephfs.open, b'file-1', 'a') + cephfs.unlink(b'file-1') + +@with_setup(setup_test) +def test_link(): + fd = cephfs.open(b'file-1', 'w', 0o755) + cephfs.write(fd, b"1111", 0) + cephfs.close(fd) + cephfs.link(b'file-1', b'file-2') + fd = cephfs.open(b'file-2', 'r', 0o755) + assert_equal(cephfs.read(fd, 0, 4), b"1111") + cephfs.close(fd) + fd = cephfs.open(b'file-2', 'r+', 0o755) + cephfs.write(fd, b"2222", 4) + cephfs.close(fd) + fd = cephfs.open(b'file-1', 'r', 0o755) + assert_equal(cephfs.read(fd, 0, 8), b"11112222") + cephfs.close(fd) + cephfs.unlink(b'file-2') + +@with_setup(setup_test) +def test_symlink(): + fd = cephfs.open(b'file-1', 'w', 0o755) + cephfs.write(fd, b"1111", 0) + cephfs.close(fd) + cephfs.symlink(b'file-1', b'file-2') + fd = cephfs.open(b'file-2', 'r', 0o755) + assert_equal(cephfs.read(fd, 0, 4), b"1111") + cephfs.close(fd) + fd = cephfs.open(b'file-2', 'r+', 0o755) + cephfs.write(fd, b"2222", 4) + cephfs.close(fd) + fd = cephfs.open(b'file-1', 'r', 0o755) + assert_equal(cephfs.read(fd, 0, 8), b"11112222") + cephfs.close(fd) + cephfs.unlink(b'file-2') + +@with_setup(setup_test) +def test_readlink(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b"1111", 0) + cephfs.close(fd) + cephfs.symlink(b'/file-1', b'/file-2') + d = cephfs.readlink(b"/file-2",100) + assert_equal(d, b"/file-1") + cephfs.unlink(b'/file-2') + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_delete_cwd(): + assert_equal(b"/", cephfs.getcwd()) + + cephfs.mkdir(b"/temp-directory", 0o755) + cephfs.chdir(b"/temp-directory") + cephfs.rmdir(b"/temp-directory") + + # getcwd gives you something stale here: it remembers the path string + # even when things are unlinked. It's up to the caller to find out + # whether it really still exists + assert_equal(b"/temp-directory", cephfs.getcwd()) + +@with_setup(setup_test) +def test_flock(): + fd = cephfs.open(b'file-1', 'w', 0o755) + + cephfs.flock(fd, fcntl.LOCK_EX, 123); + fd2 = cephfs.open(b'file-1', 'w', 0o755) + + assert_raises(libcephfs.WouldBlock, cephfs.flock, fd2, + fcntl.LOCK_EX | fcntl.LOCK_NB, 456); + cephfs.close(fd2) + + cephfs.close(fd) + +@with_setup(setup_test) +def test_mount_unmount(): + test_directory() + cephfs.unmount() + cephfs.mount() + test_open() + +@with_setup(setup_test) +def test_mount_root(): + cephfs.mkdir(b"/mount-directory", 0o755) + cephfs.unmount() + cephfs.mount(mount_root = b"/mount-directory") + + assert_raises(libcephfs.Error, cephfs.mount, mount_root = b"/nowhere") + cephfs.unmount() + cephfs.mount() + +@with_setup(setup_test) +def test_utime(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b'0000', 0) + cephfs.close(fd) + + stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + time.sleep(1) + cephfs.utime(b'/file-1') + + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_greater(stx_post['atime'], stx_pre['atime']) + assert_greater(stx_post['mtime'], stx_pre['mtime']) + + atime_pre = int(time.mktime(stx_pre['atime'].timetuple())) + mtime_pre = int(time.mktime(stx_pre['mtime'].timetuple())) + + cephfs.utime(b'/file-1', (atime_pre, mtime_pre)) + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_equal(stx_post['atime'], stx_pre['atime']) + assert_equal(stx_post['mtime'], stx_pre['mtime']) + + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_futime(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b'0000', 0) + + stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + time.sleep(1) + cephfs.futime(fd) + + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_greater(stx_post['atime'], stx_pre['atime']) + assert_greater(stx_post['mtime'], stx_pre['mtime']) + + atime_pre = int(time.mktime(stx_pre['atime'].timetuple())) + mtime_pre = int(time.mktime(stx_pre['mtime'].timetuple())) + + cephfs.futime(fd, (atime_pre, mtime_pre)) + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_equal(stx_post['atime'], stx_pre['atime']) + assert_equal(stx_post['mtime'], stx_pre['mtime']) + + cephfs.close(fd) + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_utimes(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b'0000', 0) + cephfs.close(fd) + + stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + time.sleep(1) + cephfs.utimes(b'/file-1') + + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_greater(stx_post['atime'], stx_pre['atime']) + assert_greater(stx_post['mtime'], stx_pre['mtime']) + + atime_pre = time.mktime(stx_pre['atime'].timetuple()) + mtime_pre = time.mktime(stx_pre['mtime'].timetuple()) + + cephfs.utimes(b'/file-1', (atime_pre, mtime_pre)) + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_equal(stx_post['atime'], stx_pre['atime']) + assert_equal(stx_post['mtime'], stx_pre['mtime']) + + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_lutimes(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b'0000', 0) + cephfs.close(fd) + + cephfs.symlink(b'/file-1', b'/file-2') + + stx_pre_t = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + stx_pre_s = cephfs.statx(b'/file-2', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, libcephfs.AT_SYMLINK_NOFOLLOW) + + time.sleep(1) + cephfs.lutimes(b'/file-2') + + stx_post_t = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + stx_post_s = cephfs.statx(b'/file-2', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, libcephfs.AT_SYMLINK_NOFOLLOW) + + assert_equal(stx_post_t['atime'], stx_pre_t['atime']) + assert_equal(stx_post_t['mtime'], stx_pre_t['mtime']) + + assert_greater(stx_post_s['atime'], stx_pre_s['atime']) + assert_greater(stx_post_s['mtime'], stx_pre_s['mtime']) + + atime_pre = time.mktime(stx_pre_s['atime'].timetuple()) + mtime_pre = time.mktime(stx_pre_s['mtime'].timetuple()) + + cephfs.lutimes(b'/file-2', (atime_pre, mtime_pre)) + stx_post_s = cephfs.statx(b'/file-2', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, libcephfs.AT_SYMLINK_NOFOLLOW) + + assert_equal(stx_post_s['atime'], stx_pre_s['atime']) + assert_equal(stx_post_s['mtime'], stx_pre_s['mtime']) + + cephfs.unlink(b'/file-2') + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_futimes(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b'0000', 0) + + stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + time.sleep(1) + cephfs.futimes(fd) + + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_greater(stx_post['atime'], stx_pre['atime']) + assert_greater(stx_post['mtime'], stx_pre['mtime']) + + atime_pre = time.mktime(stx_pre['atime'].timetuple()) + mtime_pre = time.mktime(stx_pre['mtime'].timetuple()) + + cephfs.futimes(fd, (atime_pre, mtime_pre)) + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_equal(stx_post['atime'], stx_pre['atime']) + assert_equal(stx_post['mtime'], stx_pre['mtime']) + + cephfs.close(fd) + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_futimens(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b'0000', 0) + + stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + time.sleep(1) + cephfs.futimens(fd) + + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_greater(stx_post['atime'], stx_pre['atime']) + assert_greater(stx_post['mtime'], stx_pre['mtime']) + + atime_pre = time.mktime(stx_pre['atime'].timetuple()) + mtime_pre = time.mktime(stx_pre['mtime'].timetuple()) + + cephfs.futimens(fd, (atime_pre, mtime_pre)) + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_equal(stx_post['atime'], stx_pre['atime']) + assert_equal(stx_post['mtime'], stx_pre['mtime']) + + cephfs.close(fd) + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_disk_quota_exceeeded_error(): + cephfs.mkdir("/dir-1", 0o755) + cephfs.setxattr("/dir-1", "ceph.quota.max_bytes", b"5", 0) + fd = cephfs.open(b'/dir-1/file-1', 'w', 0o755) + assert_raises(libcephfs.DiskQuotaExceeded, cephfs.write, fd, b"abcdeghiklmnopqrstuvwxyz", 0) + cephfs.close(fd) + cephfs.unlink(b"/dir-1/file-1") diff --git a/src/test/pybind/test_rados.py b/src/test/pybind/test_rados.py new file mode 100644 index 00000000..c4ed1d24 --- /dev/null +++ b/src/test/pybind/test_rados.py @@ -0,0 +1,1114 @@ +from __future__ import print_function +from nose import SkipTest +from nose.tools import eq_ as eq, ok_ as ok, assert_raises +from rados import (Rados, Error, RadosStateError, Object, ObjectExists, + ObjectNotFound, ObjectBusy, requires, opt, + LIBRADOS_ALL_NSPACES, WriteOpCtx, ReadOpCtx, + LIBRADOS_SNAP_HEAD, LIBRADOS_OPERATION_BALANCE_READS, LIBRADOS_OPERATION_SKIPRWLOCKS, MonitorLog) +import time +import threading +import json +import errno +import os +import re +import sys + +# Are we running Python 2.x +_python2 = sys.version_info[0] < 3 + +def test_rados_init_error(): + assert_raises(Error, Rados, conffile='', rados_id='admin', + name='client.admin') + assert_raises(Error, Rados, conffile='', name='invalid') + assert_raises(Error, Rados, conffile='', name='bad.invalid') + +def test_rados_init(): + with Rados(conffile='', rados_id='admin'): + pass + with Rados(conffile='', name='client.admin'): + pass + with Rados(conffile='', name='client.admin'): + pass + with Rados(conffile='', name='client.admin'): + pass + +def test_ioctx_context_manager(): + with Rados(conffile='', rados_id='admin') as conn: + with conn.open_ioctx('rbd') as ioctx: + pass + +def test_parse_argv(): + args = ['osd', 'pool', 'delete', 'foobar', 'foobar', '--yes-i-really-really-mean-it'] + r = Rados() + eq(args, r.conf_parse_argv(args)) + +def test_parse_argv_empty_str(): + args = [''] + r = Rados() + eq(args, r.conf_parse_argv(args)) + +class TestRequires(object): + @requires(('foo', str), ('bar', int), ('baz', int)) + def _method_plain(self, foo, bar, baz): + ok(isinstance(foo, str)) + ok(isinstance(bar, int)) + ok(isinstance(baz, int)) + return (foo, bar, baz) + + def test_method_plain(self): + assert_raises(TypeError, self._method_plain, 42, 42, 42) + assert_raises(TypeError, self._method_plain, '42', '42', '42') + assert_raises(TypeError, self._method_plain, foo='42', bar='42', baz='42') + eq(self._method_plain('42', 42, 42), ('42', 42, 42)) + eq(self._method_plain(foo='42', bar=42, baz=42), ('42', 42, 42)) + + @requires(('opt_foo', opt(str)), ('opt_bar', opt(int)), ('baz', int)) + def _method_with_opt_arg(self, foo, bar, baz): + ok(isinstance(foo, str) or foo is None) + ok(isinstance(bar, int) or bar is None) + ok(isinstance(baz, int)) + return (foo, bar, baz) + + def test_method_with_opt_args(self): + assert_raises(TypeError, self._method_with_opt_arg, 42, 42, 42) + assert_raises(TypeError, self._method_with_opt_arg, '42', '42', 42) + assert_raises(TypeError, self._method_with_opt_arg, None, None, None) + eq(self._method_with_opt_arg(None, 42, 42), (None, 42, 42)) + eq(self._method_with_opt_arg('42', None, 42), ('42', None, 42)) + eq(self._method_with_opt_arg(None, None, 42), (None, None, 42)) + + +class TestRadosStateError(object): + def _requires_configuring(self, rados): + assert_raises(RadosStateError, rados.connect) + + def _requires_configuring_or_connected(self, rados): + assert_raises(RadosStateError, rados.conf_read_file) + assert_raises(RadosStateError, rados.conf_parse_argv, None) + assert_raises(RadosStateError, rados.conf_parse_env) + assert_raises(RadosStateError, rados.conf_get, 'opt') + assert_raises(RadosStateError, rados.conf_set, 'opt', 'val') + assert_raises(RadosStateError, rados.ping_monitor, 0) + + def _requires_connected(self, rados): + assert_raises(RadosStateError, rados.pool_exists, 'foo') + assert_raises(RadosStateError, rados.pool_lookup, 'foo') + assert_raises(RadosStateError, rados.pool_reverse_lookup, 0) + assert_raises(RadosStateError, rados.create_pool, 'foo') + assert_raises(RadosStateError, rados.get_pool_base_tier, 0) + assert_raises(RadosStateError, rados.delete_pool, 'foo') + assert_raises(RadosStateError, rados.list_pools) + assert_raises(RadosStateError, rados.get_fsid) + assert_raises(RadosStateError, rados.open_ioctx, 'foo') + assert_raises(RadosStateError, rados.mon_command, '', b'') + assert_raises(RadosStateError, rados.osd_command, 0, '', b'') + assert_raises(RadosStateError, rados.pg_command, '', '', b'') + assert_raises(RadosStateError, rados.wait_for_latest_osdmap) + assert_raises(RadosStateError, rados.blacklist_add, '127.0.0.1/123', 0) + + def test_configuring(self): + rados = Rados(conffile='') + eq('configuring', rados.state) + self._requires_connected(rados) + + def test_connected(self): + rados = Rados(conffile='') + with rados: + eq('connected', rados.state) + self._requires_configuring(rados) + + def test_shutdown(self): + rados = Rados(conffile='') + with rados: + pass + eq('shutdown', rados.state) + self._requires_configuring(rados) + self._requires_configuring_or_connected(rados) + self._requires_connected(rados) + + +class TestRados(object): + + def setUp(self): + self.rados = Rados(conffile='') + self.rados.conf_parse_env('FOO_DOES_NOT_EXIST_BLAHBLAH') + self.rados.conf_parse_env() + self.rados.connect() + + # Assume any pre-existing pools are the cluster's defaults + self.default_pools = self.rados.list_pools() + + def tearDown(self): + self.rados.shutdown() + + def test_ping_monitor(self): + assert_raises(ObjectNotFound, self.rados.ping_monitor, 'not_exists_monitor') + cmd = {'prefix': 'mon dump', 'format':'json'} + ret, buf, out = self.rados.mon_command(json.dumps(cmd), b'') + for mon in json.loads(buf.decode('utf8'))['mons']: + while True: + output = self.rados.ping_monitor(mon['name']) + if output is None: + continue + buf = json.loads(output) + if buf.get('health'): + break + + def test_create(self): + self.rados.create_pool('foo') + self.rados.delete_pool('foo') + + def test_create_utf8(self): + if _python2: + # Use encoded bytestring + poolname = b"\351\273\204" + else: + poolname = "\u9ec4" + self.rados.create_pool(poolname) + assert self.rados.pool_exists(u"\u9ec4") + self.rados.delete_pool(poolname) + + def test_pool_lookup_utf8(self): + if _python2: + poolname = u'\u9ec4' + else: + poolname = '\u9ec4' + self.rados.create_pool(poolname) + try: + poolid = self.rados.pool_lookup(poolname) + eq(poolname, self.rados.pool_reverse_lookup(poolid)) + finally: + self.rados.delete_pool(poolname) + + def test_eexist(self): + self.rados.create_pool('foo') + assert_raises(ObjectExists, self.rados.create_pool, 'foo') + self.rados.delete_pool('foo') + + def list_non_default_pools(self): + pools = self.rados.list_pools() + for p in self.default_pools: + pools.remove(p) + return set(pools) + + def test_list_pools(self): + eq(set(), self.list_non_default_pools()) + self.rados.create_pool('foo') + eq(set(['foo']), self.list_non_default_pools()) + self.rados.create_pool('bar') + eq(set(['foo', 'bar']), self.list_non_default_pools()) + self.rados.create_pool('baz') + eq(set(['foo', 'bar', 'baz']), self.list_non_default_pools()) + self.rados.delete_pool('foo') + eq(set(['bar', 'baz']), self.list_non_default_pools()) + self.rados.delete_pool('baz') + eq(set(['bar']), self.list_non_default_pools()) + self.rados.delete_pool('bar') + eq(set(), self.list_non_default_pools()) + self.rados.create_pool('a' * 500) + eq(set(['a' * 500]), self.list_non_default_pools()) + self.rados.delete_pool('a' * 500) + + def test_get_pool_base_tier(self): + self.rados.create_pool('foo') + try: + self.rados.create_pool('foo-cache') + try: + pool_id = self.rados.pool_lookup('foo') + tier_pool_id = self.rados.pool_lookup('foo-cache') + + cmd = {"prefix":"osd tier add", "pool":"foo", "tierpool":"foo-cache", "force_nonempty":""} + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, 0) + + try: + cmd = {"prefix":"osd tier cache-mode", "pool":"foo-cache", "tierpool":"foo-cache", "mode":"readonly", "yes_i_really_mean_it": True} + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, 0) + + eq(self.rados.wait_for_latest_osdmap(), 0) + + eq(pool_id, self.rados.get_pool_base_tier(pool_id)) + eq(pool_id, self.rados.get_pool_base_tier(tier_pool_id)) + finally: + cmd = {"prefix":"osd tier remove", "pool":"foo", "tierpool":"foo-cache"} + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, 0) + finally: + self.rados.delete_pool('foo-cache') + finally: + self.rados.delete_pool('foo') + + def test_get_fsid(self): + fsid = self.rados.get_fsid() + assert re.match('[0-9a-f\-]{36}', fsid, re.I) + + def test_blacklist_add(self): + self.rados.blacklist_add("1.2.3.4/123", 1) + + def test_get_cluster_stats(self): + stats = self.rados.get_cluster_stats() + assert stats['kb'] > 0 + assert stats['kb_avail'] > 0 + assert stats['kb_used'] > 0 + assert stats['num_objects'] >= 0 + + def test_monitor_log(self): + lock = threading.Condition() + def cb(arg, line, who, sec, nsec, seq, level, msg): + # NOTE(sileht): the old pyrados API was received the pointer as int + # instead of the value of arg + eq(arg, "arg") + with lock: + lock.notify() + return 0 + + # NOTE(sileht): force don't save the monitor into local var + # to ensure all references are correctly tracked into the lib + MonitorLog(self.rados, "debug", cb, "arg") + with lock: + lock.wait() + MonitorLog(self.rados, "debug", None, None) + eq(None, self.rados.monitor_callback) + +class TestIoctx(object): + + def setUp(self): + self.rados = Rados(conffile='') + self.rados.connect() + self.rados.create_pool('test_pool') + assert self.rados.pool_exists('test_pool') + self.ioctx = self.rados.open_ioctx('test_pool') + + def tearDown(self): + cmd = {"prefix":"osd unset", "key":"noup"} + self.rados.mon_command(json.dumps(cmd), b'') + self.ioctx.close() + self.rados.delete_pool('test_pool') + self.rados.shutdown() + + def test_get_last_version(self): + version = self.ioctx.get_last_version() + assert version >= 0 + + def test_get_stats(self): + stats = self.ioctx.get_stats() + eq(stats, {'num_objects_unfound': 0, + 'num_objects_missing_on_primary': 0, + 'num_object_clones': 0, + 'num_objects': 0, + 'num_object_copies': 0, + 'num_bytes': 0, + 'num_rd_kb': 0, + 'num_wr_kb': 0, + 'num_kb': 0, + 'num_wr': 0, + 'num_objects_degraded': 0, + 'num_rd': 0}) + + def test_write(self): + self.ioctx.write('abc', b'abc') + eq(self.ioctx.read('abc'), b'abc') + + def test_write_full(self): + self.ioctx.write('abc', b'abc') + eq(self.ioctx.read('abc'), b'abc') + self.ioctx.write_full('abc', b'd') + eq(self.ioctx.read('abc'), b'd') + + def test_append(self): + self.ioctx.write('abc', b'a') + self.ioctx.append('abc', b'b') + self.ioctx.append('abc', b'c') + eq(self.ioctx.read('abc'), b'abc') + + def test_write_zeros(self): + self.ioctx.write('abc', b'a\0b\0c') + eq(self.ioctx.read('abc'), b'a\0b\0c') + + def test_trunc(self): + self.ioctx.write('abc', b'abc') + self.ioctx.trunc('abc', 2) + eq(self.ioctx.read('abc'), b'ab') + size = self.ioctx.stat('abc')[0] + eq(size, 2) + + def test_list_objects_empty(self): + eq(list(self.ioctx.list_objects()), []) + + def test_list_objects(self): + self.ioctx.write('a', b'') + self.ioctx.write('b', b'foo') + self.ioctx.write_full('c', b'bar') + self.ioctx.append('d', b'jazz') + object_names = [obj.key for obj in self.ioctx.list_objects()] + eq(sorted(object_names), ['a', 'b', 'c', 'd']) + + def test_list_ns_objects(self): + self.ioctx.write('a', b'') + self.ioctx.write('b', b'foo') + self.ioctx.write_full('c', b'bar') + self.ioctx.append('d', b'jazz') + self.ioctx.set_namespace("ns1") + self.ioctx.write('ns1-a', b'') + self.ioctx.write('ns1-b', b'foo') + self.ioctx.write_full('ns1-c', b'bar') + self.ioctx.append('ns1-d', b'jazz') + self.ioctx.append('d', b'jazz') + self.ioctx.set_namespace(LIBRADOS_ALL_NSPACES) + object_names = [(obj.nspace, obj.key) for obj in self.ioctx.list_objects()] + eq(sorted(object_names), [('', 'a'), ('','b'), ('','c'), ('','d'),\ + ('ns1', 'd'), ('ns1', 'ns1-a'), ('ns1', 'ns1-b'),\ + ('ns1', 'ns1-c'), ('ns1', 'ns1-d')]) + + def test_xattrs(self): + xattrs = dict(a=b'1', b=b'2', c=b'3', d=b'a\0b', e=b'\0', f='') + self.ioctx.write('abc', b'') + for key, value in xattrs.items(): + self.ioctx.set_xattr('abc', key, value) + eq(self.ioctx.get_xattr('abc', key), value) + stored_xattrs = {} + for key, value in self.ioctx.get_xattrs('abc'): + stored_xattrs[key] = value + eq(stored_xattrs, xattrs) + + def test_obj_xattrs(self): + xattrs = dict(a=b'1', b=b'2', c=b'3', d=b'a\0b', e=b'\0', f='') + self.ioctx.write('abc', b'') + obj = list(self.ioctx.list_objects())[0] + for key, value in xattrs.items(): + obj.set_xattr(key, value) + eq(obj.get_xattr(key), value) + stored_xattrs = {} + for key, value in obj.get_xattrs(): + stored_xattrs[key] = value + eq(stored_xattrs, xattrs) + + def test_create_snap(self): + assert_raises(ObjectNotFound, self.ioctx.remove_snap, 'foo') + self.ioctx.create_snap('foo') + self.ioctx.remove_snap('foo') + + def test_list_snaps_empty(self): + eq(list(self.ioctx.list_snaps()), []) + + def test_list_snaps(self): + snaps = ['snap1', 'snap2', 'snap3'] + for snap in snaps: + self.ioctx.create_snap(snap) + listed_snaps = [snap.name for snap in self.ioctx.list_snaps()] + eq(snaps, listed_snaps) + + def test_lookup_snap(self): + self.ioctx.create_snap('foo') + snap = self.ioctx.lookup_snap('foo') + eq(snap.name, 'foo') + + def test_snap_timestamp(self): + self.ioctx.create_snap('foo') + snap = self.ioctx.lookup_snap('foo') + snap.get_timestamp() + + def test_remove_snap(self): + self.ioctx.create_snap('foo') + (snap,) = self.ioctx.list_snaps() + eq(snap.name, 'foo') + self.ioctx.remove_snap('foo') + eq(list(self.ioctx.list_snaps()), []) + + def test_snap_rollback(self): + self.ioctx.write("insnap", b"contents1") + self.ioctx.create_snap("snap1") + self.ioctx.remove_object("insnap") + self.ioctx.snap_rollback("insnap", "snap1") + eq(self.ioctx.read("insnap"), b"contents1") + self.ioctx.remove_snap("snap1") + self.ioctx.remove_object("insnap") + + def test_snap_read(self): + self.ioctx.write("insnap", b"contents1") + self.ioctx.create_snap("snap1") + self.ioctx.remove_object("insnap") + snap = self.ioctx.lookup_snap("snap1") + self.ioctx.set_read(snap.snap_id) + eq(self.ioctx.read("insnap"), b"contents1") + self.ioctx.set_read(LIBRADOS_SNAP_HEAD) + self.ioctx.write("inhead", b"contents2") + eq(self.ioctx.read("inhead"), b"contents2") + self.ioctx.remove_snap("snap1") + self.ioctx.remove_object("inhead") + + def test_set_omap(self): + keys = ("1", "2", "3", "4") + values = (b"aaa", b"bbb", b"ccc", b"\x04\x04\x04\x04") + with WriteOpCtx() as write_op: + self.ioctx.set_omap(write_op, keys, values) + write_op.set_flags(LIBRADOS_OPERATION_SKIPRWLOCKS) + self.ioctx.operate_write_op(write_op, "hw") + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals(read_op, "", "", 4) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, "hw") + next(iter) + eq(list(iter), [("2", b"bbb"), ("3", b"ccc"), ("4", b"\x04\x04\x04\x04")]) + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals(read_op, "2", "", 4) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, "hw") + eq(("3", b"ccc"), next(iter)) + eq(list(iter), [("4", b"\x04\x04\x04\x04")]) + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals(read_op, "", "2", 4) + eq(ret, 0) + read_op.set_flags(LIBRADOS_OPERATION_BALANCE_READS) + self.ioctx.operate_read_op(read_op, "hw") + eq(list(iter), [("2", b"bbb")]) + + def test_set_omap_aio(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + + keys = ("1", "2", "3", "4") + values = (b"aaa", b"bbb", b"ccc", b"\x04\x04\x04\x04") + with WriteOpCtx() as write_op: + self.ioctx.set_omap(write_op, keys, values) + comp = self.ioctx.operate_aio_write_op(write_op, "hw", cb, cb) + comp.wait_for_complete() + comp.wait_for_safe() + with lock: + while count[0] < 2: + lock.wait() + eq(comp.get_return_value(), 0) + + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals(read_op, "", "", 4) + eq(ret, 0) + comp = self.ioctx.operate_aio_read_op(read_op, "hw", cb, cb) + comp.wait_for_complete() + comp.wait_for_safe() + with lock: + while count[0] < 4: + lock.wait() + eq(comp.get_return_value(), 0) + next(iter) + eq(list(iter), [("2", b"bbb"), ("3", b"ccc"), ("4", b"\x04\x04\x04\x04")]) + + def test_write_ops(self): + with WriteOpCtx() as write_op: + write_op.new(0) + self.ioctx.operate_write_op(write_op, "write_ops") + eq(self.ioctx.read('write_ops'), b'') + write_op.write_full(b'1') + write_op.append(b'2') + self.ioctx.operate_write_op(write_op, "write_ops") + eq(self.ioctx.read('write_ops'), b'12') + write_op.write_full(b'12345') + write_op.write(b'x', 2) + self.ioctx.operate_write_op(write_op, "write_ops") + eq(self.ioctx.read('write_ops'), b'12x45') + write_op.write_full(b'12345') + write_op.zero(2, 2) + self.ioctx.operate_write_op(write_op, "write_ops") + eq(self.ioctx.read('write_ops'), b'12\x00\x005') + write_op.write_full(b'12345') + write_op.truncate(2) + self.ioctx.operate_write_op(write_op, "write_ops") + eq(self.ioctx.read('write_ops'), b'12') + write_op.remove() + self.ioctx.operate_write_op(write_op, "write_ops") + with assert_raises(ObjectNotFound): + self.ioctx.read('write_ops') + + def test_get_omap_vals_by_keys(self): + keys = ("1", "2", "3", "4") + values = (b"aaa", b"bbb", b"ccc", b"\x04\x04\x04\x04") + with WriteOpCtx() as write_op: + self.ioctx.set_omap(write_op, keys, values) + self.ioctx.operate_write_op(write_op, "hw") + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals_by_keys(read_op,("3","4",)) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, "hw") + eq(list(iter), [("3", b"ccc"), ("4", b"\x04\x04\x04\x04")]) + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals_by_keys(read_op,("3","4",)) + eq(ret, 0) + with assert_raises(ObjectNotFound): + self.ioctx.operate_read_op(read_op, "no_such") + + def test_get_omap_keys(self): + keys = ("1", "2", "3") + values = (b"aaa", b"bbb", b"ccc") + with WriteOpCtx() as write_op: + self.ioctx.set_omap(write_op, keys, values) + self.ioctx.operate_write_op(write_op, "hw") + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_keys(read_op,"",2) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, "hw") + eq(list(iter), [("1", None), ("2", None)]) + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_keys(read_op,"",2) + eq(ret, 0) + with assert_raises(ObjectNotFound): + self.ioctx.operate_read_op(read_op, "no_such") + + def test_clear_omap(self): + keys = ("1", "2", "3") + values = (b"aaa", b"bbb", b"ccc") + with WriteOpCtx() as write_op: + self.ioctx.set_omap(write_op, keys, values) + self.ioctx.operate_write_op(write_op, "hw") + with WriteOpCtx() as write_op_1: + self.ioctx.clear_omap(write_op_1) + self.ioctx.operate_write_op(write_op_1, "hw") + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals_by_keys(read_op,("1",)) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, "hw") + eq(list(iter), []) + + def test_locator(self): + self.ioctx.set_locator_key("bar") + self.ioctx.write('foo', b'contents1') + objects = [i for i in self.ioctx.list_objects()] + eq(len(objects), 1) + eq(self.ioctx.get_locator_key(), "bar") + self.ioctx.set_locator_key("") + objects[0].seek(0) + objects[0].write(b"contents2") + eq(self.ioctx.get_locator_key(), "") + self.ioctx.set_locator_key("bar") + contents = self.ioctx.read("foo") + eq(contents, b"contents2") + eq(self.ioctx.get_locator_key(), "bar") + objects[0].remove() + objects = [i for i in self.ioctx.list_objects()] + eq(objects, []) + self.ioctx.set_locator_key("") + + def test_aio_write(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + comp = self.ioctx.aio_write("foo", b"bar", 0, cb, cb) + comp.wait_for_complete() + comp.wait_for_safe() + with lock: + while count[0] < 2: + lock.wait() + eq(comp.get_return_value(), 0) + contents = self.ioctx.read("foo") + eq(contents, b"bar") + [i.remove() for i in self.ioctx.list_objects()] + + def test_aio_write_no_comp_ref(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + # NOTE(sileht): force don't save the comp into local var + # to ensure all references are correctly tracked into the lib + self.ioctx.aio_write("foo", b"bar", 0, cb, cb) + with lock: + while count[0] < 2: + lock.wait() + contents = self.ioctx.read("foo") + eq(contents, b"bar") + [i.remove() for i in self.ioctx.list_objects()] + + def test_aio_append(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + comp = self.ioctx.aio_write("foo", b"bar", 0, cb, cb) + comp2 = self.ioctx.aio_append("foo", b"baz", cb, cb) + comp.wait_for_complete() + contents = self.ioctx.read("foo") + eq(contents, b"barbaz") + with lock: + while count[0] < 4: + lock.wait() + eq(comp.get_return_value(), 0) + eq(comp2.get_return_value(), 0) + [i.remove() for i in self.ioctx.list_objects()] + + def test_aio_write_full(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + self.ioctx.aio_write("foo", b"barbaz", 0, cb, cb) + comp = self.ioctx.aio_write_full("foo", b"bar", cb, cb) + comp.wait_for_complete() + comp.wait_for_safe() + with lock: + while count[0] < 2: + lock.wait() + eq(comp.get_return_value(), 0) + contents = self.ioctx.read("foo") + eq(contents, b"bar") + [i.remove() for i in self.ioctx.list_objects()] + + def test_aio_stat(self): + lock = threading.Condition() + count = [0] + def cb(_, size, mtime): + with lock: + count[0] += 1 + lock.notify() + + comp = self.ioctx.aio_stat("foo", cb) + comp.wait_for_complete() + with lock: + while count[0] < 1: + lock.wait() + eq(comp.get_return_value(), -2) + + self.ioctx.write("foo", b"bar") + + comp = self.ioctx.aio_stat("foo", cb) + comp.wait_for_complete() + with lock: + while count[0] < 2: + lock.wait() + eq(comp.get_return_value(), 0) + + [i.remove() for i in self.ioctx.list_objects()] + + def _take_down_acting_set(self, pool, objectname): + # find acting_set for pool:objectname and take it down; used to + # verify that async reads don't complete while acting set is missing + cmd = { + "prefix":"osd map", + "pool":pool, + "object":objectname, + "format":"json", + } + r, jsonout, _ = self.rados.mon_command(json.dumps(cmd), b'') + objmap = json.loads(jsonout.decode("utf-8")) + acting_set = objmap['acting'] + cmd = {"prefix":"osd set", "key":"noup"} + r, _, _ = self.rados.mon_command(json.dumps(cmd), b'') + eq(r, 0) + cmd = {"prefix":"osd down", "ids":[str(i) for i in acting_set]} + r, _, _ = self.rados.mon_command(json.dumps(cmd), b'') + eq(r, 0) + + # wait for OSDs to acknowledge the down + eq(self.rados.wait_for_latest_osdmap(), 0) + + def _let_osds_back_up(self): + cmd = {"prefix":"osd unset", "key":"noup"} + r, _, _ = self.rados.mon_command(json.dumps(cmd), b'') + eq(r, 0) + + def test_aio_read(self): + # this is a list so that the local cb() can modify it + retval = [None] + lock = threading.Condition() + def cb(_, buf): + with lock: + retval[0] = buf + lock.notify() + payload = b"bar\000frob" + self.ioctx.write("foo", payload) + + # test1: use wait_for_complete() and wait for cb by + # watching retval[0] + self._take_down_acting_set('test_pool', 'foo') + comp = self.ioctx.aio_read("foo", len(payload), 0, cb) + eq(False, comp.is_complete()) + time.sleep(3) + eq(False, comp.is_complete()) + with lock: + eq(None, retval[0]) + self._let_osds_back_up() + comp.wait_for_complete() + loops = 0 + with lock: + while retval[0] is None and loops <= 10: + lock.wait(timeout=5) + loops += 1 + assert(loops <= 10) + + eq(retval[0], payload) + eq(sys.getrefcount(comp), 2) + + # test2: use wait_for_complete_and_cb(), verify retval[0] is + # set by the time we regain control + + retval[0] = None + self._take_down_acting_set('test_pool', 'foo') + comp = self.ioctx.aio_read("foo", len(payload), 0, cb) + eq(False, comp.is_complete()) + time.sleep(3) + eq(False, comp.is_complete()) + with lock: + eq(None, retval[0]) + self._let_osds_back_up() + + comp.wait_for_complete_and_cb() + assert(retval[0] is not None) + eq(retval[0], payload) + eq(sys.getrefcount(comp), 2) + + # test3: error case, use wait_for_complete_and_cb(), verify retval[0] is + # set by the time we regain control + + retval[0] = 1 + self._take_down_acting_set('test_pool', 'bar') + comp = self.ioctx.aio_read("bar", len(payload), 0, cb) + eq(False, comp.is_complete()) + time.sleep(3) + eq(False, comp.is_complete()) + with lock: + eq(1, retval[0]) + self._let_osds_back_up() + + comp.wait_for_complete_and_cb() + eq(None, retval[0]) + assert(comp.get_return_value() < 0) + eq(sys.getrefcount(comp), 2) + + [i.remove() for i in self.ioctx.list_objects()] + + def test_lock(self): + self.ioctx.lock_exclusive("foo", "lock", "locker", "desc_lock", + 10000, 0) + assert_raises(ObjectExists, + self.ioctx.lock_exclusive, + "foo", "lock", "locker", "desc_lock", 10000, 0) + self.ioctx.unlock("foo", "lock", "locker") + assert_raises(ObjectNotFound, self.ioctx.unlock, "foo", "lock", "locker") + + self.ioctx.lock_shared("foo", "lock", "locker1", "tag", "desc_lock", + 10000, 0) + self.ioctx.lock_shared("foo", "lock", "locker2", "tag", "desc_lock", + 10000, 0) + assert_raises(ObjectBusy, + self.ioctx.lock_exclusive, + "foo", "lock", "locker3", "desc_lock", 10000, 0) + self.ioctx.unlock("foo", "lock", "locker1") + self.ioctx.unlock("foo", "lock", "locker2") + assert_raises(ObjectNotFound, self.ioctx.unlock, "foo", "lock", "locker1") + assert_raises(ObjectNotFound, self.ioctx.unlock, "foo", "lock", "locker2") + + def test_execute(self): + self.ioctx.write("foo", b"") # ensure object exists + + ret, buf = self.ioctx.execute("foo", "hello", "say_hello", b"") + eq(buf, b"Hello, world!") + + ret, buf = self.ioctx.execute("foo", "hello", "say_hello", b"nose") + eq(buf, b"Hello, nose!") + + def test_aio_execute(self): + count = [0] + retval = [None] + lock = threading.Condition() + def cb(_, buf): + with lock: + if retval[0] is None: + retval[0] = buf + count[0] += 1 + lock.notify() + self.ioctx.write("foo", b"") # ensure object exists + + comp = self.ioctx.aio_execute("foo", "hello", "say_hello", b"", 32, cb, cb) + comp.wait_for_complete() + with lock: + while count[0] < 2: + lock.wait() + eq(comp.get_return_value(), 13) + eq(retval[0], b"Hello, world!") + + retval[0] = None + comp = self.ioctx.aio_execute("foo", "hello", "say_hello", b"nose", 32, cb, cb) + comp.wait_for_complete() + with lock: + while count[0] < 4: + lock.wait() + eq(comp.get_return_value(), 12) + eq(retval[0], b"Hello, nose!") + + [i.remove() for i in self.ioctx.list_objects()] + + def test_applications(self): + cmd = {"prefix":"osd dump", "format":"json"} + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'') + eq(ret, 0) + assert len(buf) > 0 + release = json.loads(buf.decode("utf-8")).get("require_osd_release", + None) + if not release or release[0] < 'l': + raise SkipTest + + eq([], self.ioctx.application_list()) + + self.ioctx.application_enable("app1") + assert_raises(Error, self.ioctx.application_enable, "app2") + self.ioctx.application_enable("app2", True) + + assert_raises(Error, self.ioctx.application_metadata_list, "dne") + eq([], self.ioctx.application_metadata_list("app1")) + + assert_raises(Error, self.ioctx.application_metadata_set, "dne", "key", + "key") + self.ioctx.application_metadata_set("app1", "key1", "val1") + self.ioctx.application_metadata_set("app1", "key2", "val2") + self.ioctx.application_metadata_set("app2", "key1", "val1") + + eq([("key1", "val1"), ("key2", "val2")], + self.ioctx.application_metadata_list("app1")) + + self.ioctx.application_metadata_remove("app1", "key1") + eq([("key2", "val2")], self.ioctx.application_metadata_list("app1")) + + def test_service_daemon(self): + name = "pid-" + str(os.getpid()) + metadata = {'version': '3.14', 'memory': '42'} + self.rados.service_daemon_register("laundry", name, metadata) + status = {'result': 'unknown', 'test': 'running'} + self.rados.service_daemon_update(status) + + def test_alignment(self): + eq(self.ioctx.alignment(), None) + + +class TestIoctxEc(object): + + def setUp(self): + self.rados = Rados(conffile='') + self.rados.connect() + self.pool = 'test-ec' + self.profile = 'testprofile-%s' % self.pool + cmd = {"prefix": "osd erasure-code-profile set", + "name": self.profile, "profile": ["k=2", "m=1", "crush-failure-domain=osd"]} + ret, buf, out = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, 0, msg=out) + # create ec pool with profile created above + cmd = {'prefix': 'osd pool create', 'pg_num': 8, 'pgp_num': 8, + 'pool': self.pool, 'pool_type': 'erasure', + 'erasure_code_profile': self.profile} + ret, buf, out = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, 0, msg=out) + assert self.rados.pool_exists(self.pool) + self.ioctx = self.rados.open_ioctx(self.pool) + + def tearDown(self): + cmd = {"prefix": "osd unset", "key": "noup"} + self.rados.mon_command(json.dumps(cmd), b'') + self.ioctx.close() + self.rados.delete_pool(self.pool) + self.rados.shutdown() + + def test_alignment(self): + eq(self.ioctx.alignment(), 8192) + + +class TestIoctx2(object): + + def setUp(self): + self.rados = Rados(conffile='') + self.rados.connect() + self.rados.create_pool('test_pool') + assert self.rados.pool_exists('test_pool') + pool_id = self.rados.pool_lookup('test_pool') + assert pool_id > 0 + self.ioctx2 = self.rados.open_ioctx2(pool_id) + + def tearDown(self): + cmd = {"prefix": "osd unset", "key": "noup"} + self.rados.mon_command(json.dumps(cmd), b'') + self.ioctx2.close() + self.rados.delete_pool('test_pool') + self.rados.shutdown() + + def test_get_last_version(self): + version = self.ioctx2.get_last_version() + assert version >= 0 + + def test_get_stats(self): + stats = self.ioctx2.get_stats() + eq(stats, {'num_objects_unfound': 0, + 'num_objects_missing_on_primary': 0, + 'num_object_clones': 0, + 'num_objects': 0, + 'num_object_copies': 0, + 'num_bytes': 0, + 'num_rd_kb': 0, + 'num_wr_kb': 0, + 'num_kb': 0, + 'num_wr': 0, + 'num_objects_degraded': 0, + 'num_rd': 0}) + + +class TestObject(object): + + def setUp(self): + self.rados = Rados(conffile='') + self.rados.connect() + self.rados.create_pool('test_pool') + assert self.rados.pool_exists('test_pool') + self.ioctx = self.rados.open_ioctx('test_pool') + self.ioctx.write('foo', b'bar') + self.object = Object(self.ioctx, 'foo') + + def tearDown(self): + self.ioctx.close() + self.ioctx = None + self.rados.delete_pool('test_pool') + self.rados.shutdown() + self.rados = None + + def test_read(self): + eq(self.object.read(3), b'bar') + eq(self.object.read(100), b'') + + def test_seek(self): + self.object.write(b'blah') + self.object.seek(0) + eq(self.object.read(4), b'blah') + self.object.seek(1) + eq(self.object.read(3), b'lah') + + def test_write(self): + self.object.write(b'barbaz') + self.object.seek(0) + eq(self.object.read(3), b'bar') + eq(self.object.read(3), b'baz') + +class TestIoCtxSelfManagedSnaps(object): + def setUp(self): + self.rados = Rados(conffile='') + self.rados.connect() + self.rados.create_pool('test_pool') + assert self.rados.pool_exists('test_pool') + self.ioctx = self.rados.open_ioctx('test_pool') + + def tearDown(self): + cmd = {"prefix":"osd unset", "key":"noup"} + self.rados.mon_command(json.dumps(cmd), b'') + self.ioctx.close() + self.rados.delete_pool('test_pool') + self.rados.shutdown() + + def test(self): + # cannot mix-and-match pool and self-managed snapshot mode + self.ioctx.set_self_managed_snap_write([]) + self.ioctx.write('abc', b'abc') + snap_id_1 = self.ioctx.create_self_managed_snap() + self.ioctx.set_self_managed_snap_write([snap_id_1]) + + self.ioctx.write('abc', b'def') + snap_id_2 = self.ioctx.create_self_managed_snap() + self.ioctx.set_self_managed_snap_write([snap_id_1, snap_id_2]) + + self.ioctx.write('abc', b'ghi') + + self.ioctx.rollback_self_managed_snap('abc', snap_id_1) + eq(self.ioctx.read('abc'), b'abc') + + self.ioctx.rollback_self_managed_snap('abc', snap_id_2) + eq(self.ioctx.read('abc'), b'def') + + self.ioctx.remove_self_managed_snap(snap_id_1) + self.ioctx.remove_self_managed_snap(snap_id_2) + +class TestCommand(object): + + def setUp(self): + self.rados = Rados(conffile='') + self.rados.connect() + + def tearDown(self): + self.rados.shutdown() + + def test_monmap_dump(self): + + # check for success and some plain output with epoch in it + cmd = {"prefix":"mon dump"} + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, 0) + assert len(buf) > 0 + assert(b'epoch' in buf) + + # JSON, and grab current epoch + cmd['format'] = 'json' + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, 0) + assert len(buf) > 0 + d = json.loads(buf.decode("utf-8")) + assert('epoch' in d) + epoch = d['epoch'] + + # assume epoch + 1000 does not exist; test for ENOENT + cmd['epoch'] = epoch + 1000 + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, -errno.ENOENT) + eq(len(buf), 0) + del cmd['epoch'] + + # send to specific target by name + target = d['mons'][0]['name'] + print(target) + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30, + target=target) + eq(ret, 0) + assert len(buf) > 0 + d = json.loads(buf.decode("utf-8")) + assert('epoch' in d) + + # and by rank + target = d['mons'][0]['rank'] + print(target) + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30, + target=target) + eq(ret, 0) + assert len(buf) > 0 + d = json.loads(buf.decode("utf-8")) + assert('epoch' in d) + + def test_osd_bench(self): + cmd = dict(prefix='bench', size=4096, count=8192) + ret, buf, err = self.rados.osd_command(0, json.dumps(cmd), b'', + timeout=30) + eq(ret, 0) + assert len(buf) > 0 + out = json.loads(buf.decode('utf-8')) + eq(out['blocksize'], cmd['size']) + eq(out['bytes_written'], cmd['count']) + + def test_ceph_osd_pool_create_utf8(self): + if _python2: + # Use encoded bytestring + poolname = b"\351\273\205" + else: + poolname = "\u9ec5" + + cmd = {"prefix": "osd pool create", "pg_num": 16, "pool": poolname} + ret, buf, out = self.rados.mon_command(json.dumps(cmd), b'') + eq(ret, 0) + assert len(out) > 0 + eq(u"pool '\u9ec5' created", out) diff --git a/src/test/pybind/test_rbd.py b/src/test/pybind/test_rbd.py new file mode 100644 index 00000000..0fe72e96 --- /dev/null +++ b/src/test/pybind/test_rbd.py @@ -0,0 +1,2310 @@ +# vim: expandtab smarttab shiftwidth=4 softtabstop=4 +import base64 +import errno +import functools +import json +import socket +import os +import time +import sys + +from datetime import datetime, timedelta +from nose import with_setup, SkipTest +from nose.tools import eq_ as eq, assert_raises, assert_not_equal +from rados import (Rados, + LIBRADOS_OP_FLAG_FADVISE_DONTNEED, + LIBRADOS_OP_FLAG_FADVISE_NOCACHE, + LIBRADOS_OP_FLAG_FADVISE_RANDOM) +from rbd import (RBD, Group, Image, ImageNotFound, InvalidArgument, ImageExists, + ImageBusy, ImageHasSnapshots, ReadOnlyImage, + FunctionNotSupported, ArgumentOutOfRange, + ECANCELED, OperationCanceled, + DiskQuotaExceeded, ConnectionShutdown, PermissionError, + RBD_FEATURE_LAYERING, RBD_FEATURE_STRIPINGV2, + RBD_FEATURE_EXCLUSIVE_LOCK, RBD_FEATURE_JOURNALING, + RBD_MIRROR_MODE_DISABLED, RBD_MIRROR_MODE_IMAGE, + RBD_MIRROR_MODE_POOL, RBD_MIRROR_IMAGE_ENABLED, + RBD_MIRROR_IMAGE_DISABLED, MIRROR_IMAGE_STATUS_STATE_UNKNOWN, + RBD_LOCK_MODE_EXCLUSIVE, RBD_OPERATION_FEATURE_GROUP, + RBD_SNAP_NAMESPACE_TYPE_TRASH, + RBD_IMAGE_MIGRATION_STATE_PREPARED, RBD_CONFIG_SOURCE_CONFIG, + RBD_CONFIG_SOURCE_POOL, RBD_CONFIG_SOURCE_IMAGE, + RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST, + RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY, + RBD_MIRROR_PEER_DIRECTION_RX) + +rados = None +ioctx = None +features = None +image_idx = 0 +group_idx = 0 +snap_idx = 0 +image_name = None +group_name = None +snap_name = None +pool_idx = 0 +pool_name = None +IMG_SIZE = 8 << 20 # 8 MiB +IMG_ORDER = 22 # 4 MiB objects + +os.environ["RBD_FORCE_ALLOW_V1"] = "1" + +def setup_module(): + global rados + rados = Rados(conffile='') + rados.connect() + global pool_name + pool_name = get_temp_pool_name() + rados.create_pool(pool_name) + global ioctx + ioctx = rados.open_ioctx(pool_name) + RBD().pool_init(ioctx, True) + global features + features = os.getenv("RBD_FEATURES") + features = int(features) if features is not None else 61 + +def teardown_module(): + global ioctx + ioctx.close() + global rados + rados.delete_pool(pool_name) + rados.shutdown() + +def get_temp_pool_name(): + global pool_idx + pool_idx += 1 + return "test-rbd-api-" + socket.gethostname() + '-' + str(os.getpid()) + \ + '-' + str(pool_idx) + +def get_temp_image_name(): + global image_idx + image_idx += 1 + return "image" + str(image_idx) + +def get_temp_group_name(): + global group_idx + group_idx += 1 + return "group" + str(group_idx) + +def get_temp_snap_name(): + global snap_idx + snap_idx += 1 + return "snap" + str(snap_idx) + +def create_image(): + global image_name + image_name = get_temp_image_name() + if features is not None: + RBD().create(ioctx, image_name, IMG_SIZE, IMG_ORDER, old_format=False, + features=int(features)) + else: + RBD().create(ioctx, image_name, IMG_SIZE, IMG_ORDER, old_format=True) + return image_name + +def remove_image(): + if image_name is not None: + RBD().remove(ioctx, image_name) + +def create_group(): + global group_name + group_name = get_temp_group_name() + RBD().group_create(ioctx, group_name) + +def remove_group(): + if group_name is not None: + RBD().group_remove(ioctx, group_name) + +def rename_group(): + new_group_name = "new" + group_name + RBD().group_rename(ioctx, group_name, new_group_name) + +def require_new_format(): + def wrapper(fn): + def _require_new_format(*args, **kwargs): + global features + if features is None: + raise SkipTest + return fn(*args, **kwargs) + return functools.wraps(fn)(_require_new_format) + return wrapper + +def require_features(required_features): + def wrapper(fn): + def _require_features(*args, **kwargs): + global features + if features is None: + raise SkipTest + for feature in required_features: + if feature & features != feature: + raise SkipTest + return fn(*args, **kwargs) + return functools.wraps(fn)(_require_features) + return wrapper + +def blacklist_features(blacklisted_features): + def wrapper(fn): + def _blacklist_features(*args, **kwargs): + global features + for feature in blacklisted_features: + if features is not None and feature & features == feature: + raise SkipTest + return fn(*args, **kwargs) + return functools.wraps(fn)(_blacklist_features) + return wrapper + +def test_version(): + RBD().version() + +def test_create(): + create_image() + remove_image() + +def check_default_params(format, order=None, features=None, stripe_count=None, + stripe_unit=None, exception=None): + global rados + global ioctx + orig_vals = {} + for k in ['rbd_default_format', 'rbd_default_order', 'rbd_default_features', + 'rbd_default_stripe_count', 'rbd_default_stripe_unit']: + orig_vals[k] = rados.conf_get(k) + try: + rados.conf_set('rbd_default_format', str(format)) + if order is not None: + rados.conf_set('rbd_default_order', str(order or 0)) + if features is not None: + rados.conf_set('rbd_default_features', str(features or 0)) + if stripe_count is not None: + rados.conf_set('rbd_default_stripe_count', str(stripe_count or 0)) + if stripe_unit is not None: + rados.conf_set('rbd_default_stripe_unit', str(stripe_unit or 0)) + feature_data_pool = 0 + datapool = rados.conf_get('rbd_default_data_pool') + if not len(datapool) == 0: + feature_data_pool = 128 + image_name = get_temp_image_name() + if exception is None: + RBD().create(ioctx, image_name, IMG_SIZE, old_format=(format == 1)) + try: + with Image(ioctx, image_name) as image: + eq(format == 1, image.old_format()) + + expected_order = int(rados.conf_get('rbd_default_order')) + actual_order = image.stat()['order'] + eq(expected_order, actual_order) + + expected_features = features + if format == 1: + expected_features = 0 + elif expected_features is None: + expected_features = 61 | feature_data_pool + else: + expected_features |= feature_data_pool + eq(expected_features, image.features()) + + expected_stripe_count = stripe_count + if not expected_stripe_count or format == 1 or \ + features & RBD_FEATURE_STRIPINGV2 == 0: + expected_stripe_count = 1 + eq(expected_stripe_count, image.stripe_count()) + + expected_stripe_unit = stripe_unit + if not expected_stripe_unit or format == 1 or \ + features & RBD_FEATURE_STRIPINGV2 == 0: + expected_stripe_unit = 1 << actual_order + eq(expected_stripe_unit, image.stripe_unit()) + finally: + RBD().remove(ioctx, image_name) + else: + assert_raises(exception, RBD().create, ioctx, image_name, IMG_SIZE) + finally: + for k, v in orig_vals.items(): + rados.conf_set(k, v) + +def test_create_defaults(): + # basic format 1 and 2 + check_default_params(1) + check_default_params(2) + # invalid order + check_default_params(1, 0, exception=ArgumentOutOfRange) + check_default_params(2, 0, exception=ArgumentOutOfRange) + check_default_params(1, 11, exception=ArgumentOutOfRange) + check_default_params(2, 11, exception=ArgumentOutOfRange) + check_default_params(1, 65, exception=ArgumentOutOfRange) + check_default_params(2, 65, exception=ArgumentOutOfRange) + # striping and features are ignored for format 1 + check_default_params(1, 20, 0, 1, 1) + check_default_params(1, 20, 3, 1, 1) + check_default_params(1, 20, 0, 0, 0) + # striping is ignored if stripingv2 is not set + check_default_params(2, 20, 0, 1, 1 << 20) + check_default_params(2, 20, RBD_FEATURE_LAYERING, 1, 1 << 20) + check_default_params(2, 20, 0, 0, 0) + # striping with stripingv2 is fine + check_default_params(2, 20, RBD_FEATURE_STRIPINGV2, 1, 1 << 16) + check_default_params(2, 20, RBD_FEATURE_STRIPINGV2, 10, 1 << 20) + check_default_params(2, 20, RBD_FEATURE_STRIPINGV2, 10, 1 << 16) + check_default_params(2, 20, 0, 0, 0) + # make sure invalid combinations of stripe unit and order are still invalid + check_default_params(2, 22, RBD_FEATURE_STRIPINGV2, 10, 1 << 50, exception=InvalidArgument) + check_default_params(2, 22, RBD_FEATURE_STRIPINGV2, 10, 100, exception=InvalidArgument) + check_default_params(2, 22, RBD_FEATURE_STRIPINGV2, 0, 1, exception=InvalidArgument) + check_default_params(2, 22, RBD_FEATURE_STRIPINGV2, 1, 0, exception=InvalidArgument) + # 0 stripe unit and count are still ignored + check_default_params(2, 22, 0, 0, 0) + +def test_context_manager(): + with Rados(conffile='') as cluster: + with cluster.open_ioctx(pool_name) as ioctx: + image_name = get_temp_image_name() + RBD().create(ioctx, image_name, IMG_SIZE) + with Image(ioctx, image_name) as image: + data = rand_data(256) + image.write(data, 0) + read = image.read(0, 256) + RBD().remove(ioctx, image_name) + eq(data, read) + +def test_open_read_only(): + with Rados(conffile='') as cluster: + with cluster.open_ioctx(pool_name) as ioctx: + image_name = get_temp_image_name() + RBD().create(ioctx, image_name, IMG_SIZE) + data = rand_data(256) + with Image(ioctx, image_name) as image: + image.write(data, 0) + image.create_snap('snap') + with Image(ioctx, image_name, read_only=True) as image: + read = image.read(0, 256) + eq(data, read) + assert_raises(ReadOnlyImage, image.write, data, 0) + assert_raises(ReadOnlyImage, image.create_snap, 'test') + assert_raises(ReadOnlyImage, image.remove_snap, 'snap') + assert_raises(ReadOnlyImage, image.rollback_to_snap, 'snap') + assert_raises(ReadOnlyImage, image.protect_snap, 'snap') + assert_raises(ReadOnlyImage, image.unprotect_snap, 'snap') + assert_raises(ReadOnlyImage, image.unprotect_snap, 'snap') + assert_raises(ReadOnlyImage, image.flatten) + with Image(ioctx, image_name) as image: + image.remove_snap('snap') + RBD().remove(ioctx, image_name) + eq(data, read) + +def test_open_dne(): + for i in range(100): + image_name = get_temp_image_name() + assert_raises(ImageNotFound, Image, ioctx, image_name + 'dne') + assert_raises(ImageNotFound, Image, ioctx, image_name, 'snap') + +def test_open_readonly_dne(): + for i in range(100): + image_name = get_temp_image_name() + assert_raises(ImageNotFound, Image, ioctx, image_name + 'dne', + read_only=True) + assert_raises(ImageNotFound, Image, ioctx, image_name, 'snap', + read_only=True) + +@require_new_format() +def test_open_by_id(): + with Rados(conffile='') as cluster: + with cluster.open_ioctx(pool_name) as ioctx: + image_name = get_temp_image_name() + RBD().create(ioctx, image_name, IMG_SIZE) + with Image(ioctx, image_name) as image: + image_id = image.id() + with Image(ioctx, image_id=image_id) as image: + eq(image.get_name(), image_name) + RBD().remove(ioctx, image_name) + +def test_remove_dne(): + assert_raises(ImageNotFound, remove_image) + +def test_list_empty(): + eq([], RBD().list(ioctx)) + +@with_setup(create_image, remove_image) +def test_list(): + eq([image_name], RBD().list(ioctx)) + + with Image(ioctx, image_name) as image: + image_id = image.id() + eq([{'id': image_id, 'name': image_name}], list(RBD().list2(ioctx))) + +@with_setup(create_image) +def test_remove_with_progress(): + d = {'received_callback': False} + def progress_cb(current, total): + d['received_callback'] = True + return 0 + + RBD().remove(ioctx, image_name, on_progress=progress_cb) + eq(True, d['received_callback']) + +@with_setup(create_image) +def test_remove_canceled(): + def progress_cb(current, total): + return -ECANCELED + + assert_raises(OperationCanceled, RBD().remove, ioctx, image_name, + on_progress=progress_cb) + +@with_setup(create_image, remove_image) +def test_rename(): + rbd = RBD() + image_name2 = get_temp_image_name() + rbd.rename(ioctx, image_name, image_name2) + eq([image_name2], rbd.list(ioctx)) + rbd.rename(ioctx, image_name2, image_name) + eq([image_name], rbd.list(ioctx)) + +def test_pool_metadata(): + rbd = RBD() + metadata = list(rbd.pool_metadata_list(ioctx)) + eq(len(metadata), 0) + assert_raises(KeyError, rbd.pool_metadata_get, ioctx, "key1") + rbd.pool_metadata_set(ioctx, "key1", "value1") + rbd.pool_metadata_set(ioctx, "key2", "value2") + value = rbd.pool_metadata_get(ioctx, "key1") + eq(value, "value1") + value = rbd.pool_metadata_get(ioctx, "key2") + eq(value, "value2") + metadata = list(rbd.pool_metadata_list(ioctx)) + eq(len(metadata), 2) + rbd.pool_metadata_remove(ioctx, "key1") + metadata = list(rbd.pool_metadata_list(ioctx)) + eq(len(metadata), 1) + eq(metadata[0], ("key2", "value2")) + rbd.pool_metadata_remove(ioctx, "key2") + assert_raises(KeyError, rbd.pool_metadata_remove, ioctx, "key2") + metadata = list(rbd.pool_metadata_list(ioctx)) + eq(len(metadata), 0) + + N = 65 + for i in range(N): + rbd.pool_metadata_set(ioctx, "key" + str(i), "X" * 1025) + metadata = list(rbd.pool_metadata_list(ioctx)) + eq(len(metadata), N) + for i in range(N): + rbd.pool_metadata_remove(ioctx, "key" + str(i)) + metadata = list(rbd.pool_metadata_list(ioctx)) + eq(len(metadata), N - i - 1) + +def test_config_list(): + rbd = RBD() + + for option in rbd.config_list(ioctx): + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + + rbd.pool_metadata_set(ioctx, "conf_rbd_cache", "true") + + for option in rbd.config_list(ioctx): + if option['name'] == "rbd_cache": + eq(option['source'], RBD_CONFIG_SOURCE_POOL) + else: + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + + rbd.pool_metadata_remove(ioctx, "conf_rbd_cache") + + for option in rbd.config_list(ioctx): + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + +def test_namespaces(): + rbd = RBD() + + eq(False, rbd.namespace_exists(ioctx, 'ns1')) + eq([], rbd.namespace_list(ioctx)) + assert_raises(ImageNotFound, rbd.namespace_remove, ioctx, 'ns1') + + rbd.namespace_create(ioctx, 'ns1') + eq(True, rbd.namespace_exists(ioctx, 'ns1')) + + assert_raises(ImageExists, rbd.namespace_create, ioctx, 'ns1') + eq(['ns1'], rbd.namespace_list(ioctx)) + rbd.namespace_remove(ioctx, 'ns1') + eq([], rbd.namespace_list(ioctx)) + +@require_new_format() +def test_pool_stats(): + rbd = RBD() + + try: + image1 = create_image() + image2 = create_image() + image3 = create_image() + image4 = create_image() + with Image(ioctx, image4) as image: + image.create_snap('snap') + image.resize(0) + + stats = rbd.pool_stats_get(ioctx) + eq(stats['image_count'], 4) + eq(stats['image_provisioned_bytes'], 3 * IMG_SIZE) + eq(stats['image_max_provisioned_bytes'], 4 * IMG_SIZE) + eq(stats['image_snap_count'], 1) + eq(stats['trash_count'], 0) + eq(stats['trash_provisioned_bytes'], 0) + eq(stats['trash_max_provisioned_bytes'], 0) + eq(stats['trash_snap_count'], 0) + finally: + rbd.remove(ioctx, image1) + rbd.remove(ioctx, image2) + rbd.remove(ioctx, image3) + with Image(ioctx, image4) as image: + image.remove_snap('snap') + rbd.remove(ioctx, image4) + +def rand_data(size): + return os.urandom(size) + +def check_stat(info, size, order): + assert 'block_name_prefix' in info + eq(info['size'], size) + eq(info['order'], order) + eq(info['num_objs'], size // (1 << order)) + eq(info['obj_size'], 1 << order) + +class TestImage(object): + + def setUp(self): + self.rbd = RBD() + create_image() + self.image = Image(ioctx, image_name) + + def tearDown(self): + self.image.close() + remove_image() + self.image = None + + @require_new_format() + @blacklist_features([RBD_FEATURE_EXCLUSIVE_LOCK]) + def test_update_features(self): + features = self.image.features() + self.image.update_features(RBD_FEATURE_EXCLUSIVE_LOCK, True) + eq(features | RBD_FEATURE_EXCLUSIVE_LOCK, self.image.features()) + + @require_features([RBD_FEATURE_STRIPINGV2]) + def test_create_with_params(self): + global features + image_name = get_temp_image_name() + order = 20 + stripe_unit = 1 << 20 + stripe_count = 10 + self.rbd.create(ioctx, image_name, IMG_SIZE, order, + False, features, stripe_unit, stripe_count) + image = Image(ioctx, image_name) + info = image.stat() + check_stat(info, IMG_SIZE, order) + eq(image.features(), features) + eq(image.stripe_unit(), stripe_unit) + eq(image.stripe_count(), stripe_count) + image.close() + RBD().remove(ioctx, image_name) + + @require_new_format() + def test_id(self): + assert_not_equal(b'', self.image.id()) + + def test_block_name_prefix(self): + assert_not_equal(b'', self.image.block_name_prefix()) + + def test_create_timestamp(self): + timestamp = self.image.create_timestamp() + assert_not_equal(0, timestamp.year) + assert_not_equal(1970, timestamp.year) + + def test_access_timestamp(self): + timestamp = self.image.access_timestamp() + assert_not_equal(0, timestamp.year) + assert_not_equal(1970, timestamp.year) + + def test_modify_timestamp(self): + timestamp = self.image.modify_timestamp() + assert_not_equal(0, timestamp.year) + assert_not_equal(1970, timestamp.year) + + def test_invalidate_cache(self): + self.image.write(b'abc', 0) + eq(b'abc', self.image.read(0, 3)) + self.image.invalidate_cache() + eq(b'abc', self.image.read(0, 3)) + + def test_stat(self): + info = self.image.stat() + check_stat(info, IMG_SIZE, IMG_ORDER) + + def test_flags(self): + flags = self.image.flags() + eq(0, flags) + + def test_image_auto_close(self): + image = Image(ioctx, image_name) + + def test_use_after_close(self): + self.image.close() + assert_raises(InvalidArgument, self.image.stat) + + def test_write(self): + data = rand_data(256) + self.image.write(data, 0) + + def test_write_with_fadvise_flags(self): + data = rand_data(256) + self.image.write(data, 0, LIBRADOS_OP_FLAG_FADVISE_DONTNEED) + self.image.write(data, 0, LIBRADOS_OP_FLAG_FADVISE_NOCACHE) + + def test_write_zeroes(self): + data = rand_data(256) + self.image.write(data, 0) + self.image.write_zeroes(0, 256) + eq(self.image.read(256, 256), b'\0' * 256) + + def test_read(self): + data = self.image.read(0, 20) + eq(data, b'\0' * 20) + + def test_read_with_fadvise_flags(self): + data = self.image.read(0, 20, LIBRADOS_OP_FLAG_FADVISE_DONTNEED) + eq(data, b'\0' * 20) + data = self.image.read(0, 20, LIBRADOS_OP_FLAG_FADVISE_RANDOM) + eq(data, b'\0' * 20) + + def test_large_write(self): + data = rand_data(IMG_SIZE) + self.image.write(data, 0) + + def test_large_read(self): + data = self.image.read(0, IMG_SIZE) + eq(data, b'\0' * IMG_SIZE) + + def test_write_read(self): + data = rand_data(256) + offset = 50 + self.image.write(data, offset) + read = self.image.read(offset, 256) + eq(data, read) + + def test_read_bad_offset(self): + assert_raises(InvalidArgument, self.image.read, IMG_SIZE + 1, IMG_SIZE) + + def test_resize(self): + new_size = IMG_SIZE * 2 + self.image.resize(new_size) + info = self.image.stat() + check_stat(info, new_size, IMG_ORDER) + + def test_resize_allow_shrink_False(self): + new_size = IMG_SIZE * 2 + self.image.resize(new_size) + info = self.image.stat() + check_stat(info, new_size, IMG_ORDER) + assert_raises(InvalidArgument, self.image.resize, IMG_SIZE, False) + + def test_size(self): + eq(IMG_SIZE, self.image.size()) + self.image.create_snap('snap1') + new_size = IMG_SIZE * 2 + self.image.resize(new_size) + eq(new_size, self.image.size()) + self.image.create_snap('snap2') + self.image.set_snap('snap2') + eq(new_size, self.image.size()) + self.image.set_snap('snap1') + eq(IMG_SIZE, self.image.size()) + self.image.set_snap(None) + eq(new_size, self.image.size()) + self.image.remove_snap('snap1') + self.image.remove_snap('snap2') + + def test_resize_down(self): + new_size = IMG_SIZE // 2 + data = rand_data(256) + self.image.write(data, IMG_SIZE // 2); + self.image.resize(new_size) + self.image.resize(IMG_SIZE) + read = self.image.read(IMG_SIZE // 2, 256) + eq(b'\0' * 256, read) + + def test_resize_bytes(self): + new_size = IMG_SIZE // 2 - 5 + data = rand_data(256) + self.image.write(data, IMG_SIZE // 2 - 10); + self.image.resize(new_size) + self.image.resize(IMG_SIZE) + read = self.image.read(IMG_SIZE // 2 - 10, 5) + eq(data[:5], read) + read = self.image.read(IMG_SIZE // 2 - 5, 251) + eq(b'\0' * 251, read) + + def _test_copy(self, features=None, order=None, stripe_unit=None, + stripe_count=None): + global ioctx + data = rand_data(256) + self.image.write(data, 256) + image_name = get_temp_image_name() + if features is None: + self.image.copy(ioctx, image_name) + elif order is None: + self.image.copy(ioctx, image_name, features) + elif stripe_unit is None: + self.image.copy(ioctx, image_name, features, order) + elif stripe_count is None: + self.image.copy(ioctx, image_name, features, order, stripe_unit) + else: + self.image.copy(ioctx, image_name, features, order, stripe_unit, + stripe_count) + assert_raises(ImageExists, self.image.copy, ioctx, image_name) + copy = Image(ioctx, image_name) + copy_data = copy.read(256, 256) + copy.close() + self.rbd.remove(ioctx, image_name) + eq(data, copy_data) + + def test_copy(self): + self._test_copy() + + def test_copy2(self): + self._test_copy(self.image.features(), self.image.stat()['order']) + + @require_features([RBD_FEATURE_STRIPINGV2]) + def test_copy3(self): + global features + self._test_copy(features, self.image.stat()['order'], + self.image.stripe_unit(), self.image.stripe_count()) + + def test_deep_copy(self): + global ioctx + global features + self.image.write(b'a' * 256, 0) + self.image.create_snap('snap1') + self.image.write(b'b' * 256, 0) + dst_name = get_temp_image_name() + self.image.deep_copy(ioctx, dst_name, features=features, + order=self.image.stat()['order'], + stripe_unit=self.image.stripe_unit(), + stripe_count=self.image.stripe_count(), + data_pool=None) + self.image.remove_snap('snap1') + with Image(ioctx, dst_name, 'snap1') as copy: + copy_data = copy.read(0, 256) + eq(b'a' * 256, copy_data) + with Image(ioctx, dst_name) as copy: + copy_data = copy.read(0, 256) + eq(b'b' * 256, copy_data) + copy.remove_snap('snap1') + self.rbd.remove(ioctx, dst_name) + + @require_features([RBD_FEATURE_LAYERING]) + def test_deep_copy_clone(self): + global ioctx + global features + self.image.write(b'a' * 256, 0) + self.image.create_snap('snap1') + self.image.write(b'b' * 256, 0) + self.image.protect_snap('snap1') + clone_name = get_temp_image_name() + dst_name = get_temp_image_name() + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name) + with Image(ioctx, clone_name) as child: + child.create_snap('snap1') + child.deep_copy(ioctx, dst_name, features=features, + order=self.image.stat()['order'], + stripe_unit=self.image.stripe_unit(), + stripe_count=self.image.stripe_count(), + data_pool=None) + child.remove_snap('snap1') + + with Image(ioctx, dst_name) as copy: + copy_data = copy.read(0, 256) + eq(b'a' * 256, copy_data) + copy.remove_snap('snap1') + self.rbd.remove(ioctx, dst_name) + self.rbd.remove(ioctx, clone_name) + self.image.unprotect_snap('snap1') + self.image.remove_snap('snap1') + + def test_create_snap(self): + global ioctx + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + at_snapshot = Image(ioctx, image_name, 'snap1') + snap_data = at_snapshot.read(0, 256) + at_snapshot.close() + eq(snap_data, b'\0' * 256) + self.image.remove_snap('snap1') + + def test_list_snaps(self): + eq([], list(self.image.list_snaps())) + self.image.create_snap('snap1') + eq(['snap1'], [snap['name'] for snap in self.image.list_snaps()]) + self.image.create_snap('snap2') + eq(['snap1', 'snap2'], [snap['name'] for snap in self.image.list_snaps()]) + self.image.remove_snap('snap1') + self.image.remove_snap('snap2') + + def test_list_snaps_iterator_auto_close(self): + self.image.create_snap('snap1') + self.image.list_snaps() + self.image.remove_snap('snap1') + + def test_remove_snap(self): + eq([], list(self.image.list_snaps())) + self.image.create_snap('snap1') + eq(['snap1'], [snap['name'] for snap in self.image.list_snaps()]) + self.image.remove_snap('snap1') + eq([], list(self.image.list_snaps())) + + def test_rename_snap(self): + eq([], list(self.image.list_snaps())) + self.image.create_snap('snap1') + eq(['snap1'], [snap['name'] for snap in self.image.list_snaps()]) + self.image.rename_snap("snap1", "snap1-rename") + eq(['snap1-rename'], [snap['name'] for snap in self.image.list_snaps()]) + self.image.remove_snap('snap1-rename') + eq([], list(self.image.list_snaps())) + + @require_features([RBD_FEATURE_LAYERING]) + def test_protect_snap(self): + self.image.create_snap('snap1') + assert(not self.image.is_protected_snap('snap1')) + self.image.protect_snap('snap1') + assert(self.image.is_protected_snap('snap1')) + assert_raises(ImageBusy, self.image.remove_snap, 'snap1') + self.image.unprotect_snap('snap1') + assert(not self.image.is_protected_snap('snap1')) + self.image.remove_snap('snap1') + assert_raises(ImageNotFound, self.image.unprotect_snap, 'snap1') + assert_raises(ImageNotFound, self.image.is_protected_snap, 'snap1') + + def test_snap_timestamp(self): + self.image.create_snap('snap1') + eq(['snap1'], [snap['name'] for snap in self.image.list_snaps()]) + for snap in self.image.list_snaps(): + snap_id = snap["id"] + time = self.image.get_snap_timestamp(snap_id) + assert_not_equal(b'', time.year) + assert_not_equal(0, time.year) + assert_not_equal(time.year, '1970') + self.image.remove_snap('snap1') + + def test_limit_snaps(self): + self.image.set_snap_limit(2) + eq(2, self.image.get_snap_limit()) + self.image.create_snap('snap1') + self.image.create_snap('snap2') + assert_raises(DiskQuotaExceeded, self.image.create_snap, 'snap3') + self.image.remove_snap_limit() + self.image.create_snap('snap3') + + self.image.remove_snap('snap1') + self.image.remove_snap('snap2') + self.image.remove_snap('snap3') + + @require_features([RBD_FEATURE_EXCLUSIVE_LOCK]) + def test_remove_with_exclusive_lock(self): + assert_raises(ImageBusy, remove_image) + + @blacklist_features([RBD_FEATURE_EXCLUSIVE_LOCK]) + def test_remove_with_snap(self): + self.image.create_snap('snap1') + assert_raises(ImageHasSnapshots, remove_image) + self.image.remove_snap('snap1') + + @blacklist_features([RBD_FEATURE_EXCLUSIVE_LOCK]) + def test_remove_with_watcher(self): + data = rand_data(256) + self.image.write(data, 0) + assert_raises(ImageBusy, remove_image) + read = self.image.read(0, 256) + eq(read, data) + + def test_rollback_to_snap(self): + self.image.write(b'\0' * 256, 0) + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + self.image.rollback_to_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + self.image.remove_snap('snap1') + + def test_rollback_to_snap_sparse(self): + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + self.image.rollback_to_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + self.image.remove_snap('snap1') + + def test_rollback_with_resize(self): + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, data) + new_size = IMG_SIZE * 2 + self.image.resize(new_size) + check_stat(self.image.stat(), new_size, IMG_ORDER) + self.image.write(data, new_size - 256) + self.image.create_snap('snap2') + read = self.image.read(new_size - 256, 256) + eq(read, data) + self.image.rollback_to_snap('snap1') + check_stat(self.image.stat(), IMG_SIZE, IMG_ORDER) + assert_raises(InvalidArgument, self.image.read, new_size - 256, 256) + self.image.rollback_to_snap('snap2') + check_stat(self.image.stat(), new_size, IMG_ORDER) + read = self.image.read(new_size - 256, 256) + eq(read, data) + self.image.remove_snap('snap1') + self.image.remove_snap('snap2') + + def test_set_snap(self): + self.image.write(b'\0' * 256, 0) + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + self.image.set_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + assert_raises(ReadOnlyImage, self.image.write, data, 0) + self.image.remove_snap('snap1') + + def test_set_no_snap(self): + self.image.write(b'\0' * 256, 0) + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + self.image.set_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + assert_raises(ReadOnlyImage, self.image.write, data, 0) + self.image.set_snap(None) + read = self.image.read(0, 256) + eq(read, data) + self.image.remove_snap('snap1') + + def test_set_snap_by_id(self): + self.image.write(b'\0' * 256, 0) + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + snaps = list(self.image.list_snaps()) + self.image.set_snap_by_id(snaps[0]['id']) + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + assert_raises(ReadOnlyImage, self.image.write, data, 0) + self.image.set_snap_by_id(None) + read = self.image.read(0, 256) + eq(read, data) + self.image.remove_snap('snap1') + + def test_set_snap_sparse(self): + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + self.image.set_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + assert_raises(ReadOnlyImage, self.image.write, data, 0) + self.image.remove_snap('snap1') + + def test_many_snaps(self): + num_snaps = 200 + for i in range(num_snaps): + self.image.create_snap(str(i)) + snaps = sorted(self.image.list_snaps(), + key=lambda snap: int(snap['name'])) + eq(len(snaps), num_snaps) + for i, snap in enumerate(snaps): + eq(snap['size'], IMG_SIZE) + eq(snap['name'], str(i)) + for i in range(num_snaps): + self.image.remove_snap(str(i)) + + def test_set_snap_deleted(self): + self.image.write(b'\0' * 256, 0) + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + self.image.set_snap('snap1') + self.image.remove_snap('snap1') + assert_raises(ImageNotFound, self.image.read, 0, 256) + self.image.set_snap(None) + read = self.image.read(0, 256) + eq(read, data) + + def test_set_snap_recreated(self): + self.image.write(b'\0' * 256, 0) + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + self.image.set_snap('snap1') + self.image.remove_snap('snap1') + self.image.create_snap('snap1') + assert_raises(ImageNotFound, self.image.read, 0, 256) + self.image.set_snap(None) + read = self.image.read(0, 256) + eq(read, data) + self.image.remove_snap('snap1') + + def test_lock_unlock(self): + assert_raises(ImageNotFound, self.image.unlock, '') + self.image.lock_exclusive('') + assert_raises(ImageExists, self.image.lock_exclusive, '') + assert_raises(ImageBusy, self.image.lock_exclusive, 'test') + assert_raises(ImageExists, self.image.lock_shared, '', '') + assert_raises(ImageBusy, self.image.lock_shared, 'foo', '') + self.image.unlock('') + + def test_list_lockers(self): + eq([], self.image.list_lockers()) + self.image.lock_exclusive('test') + lockers = self.image.list_lockers() + eq(1, len(lockers['lockers'])) + _, cookie, _ = lockers['lockers'][0] + eq(cookie, 'test') + eq('', lockers['tag']) + assert lockers['exclusive'] + self.image.unlock('test') + eq([], self.image.list_lockers()) + + num_shared = 10 + for i in range(num_shared): + self.image.lock_shared(str(i), 'tag') + lockers = self.image.list_lockers() + eq('tag', lockers['tag']) + assert not lockers['exclusive'] + eq(num_shared, len(lockers['lockers'])) + cookies = sorted(map(lambda x: x[1], lockers['lockers'])) + for i in range(num_shared): + eq(str(i), cookies[i]) + self.image.unlock(str(i)) + eq([], self.image.list_lockers()) + + def test_diff_iterate(self): + check_diff(self.image, 0, IMG_SIZE, None, []) + self.image.write(b'a' * 256, 0) + check_diff(self.image, 0, IMG_SIZE, None, [(0, 256, True)]) + self.image.write(b'b' * 256, 256) + check_diff(self.image, 0, IMG_SIZE, None, [(0, 512, True)]) + self.image.discard(128, 256) + check_diff(self.image, 0, IMG_SIZE, None, [(0, 512, True)]) + + self.image.create_snap('snap1') + self.image.discard(0, 1 << IMG_ORDER) + self.image.create_snap('snap2') + self.image.set_snap('snap2') + check_diff(self.image, 0, IMG_SIZE, 'snap1', [(0, 512, False)]) + self.image.remove_snap('snap1') + self.image.remove_snap('snap2') + + def test_aio_read(self): + # this is a list so that the local cb() can modify it + retval = [None] + def cb(_, buf): + retval[0] = buf + + # test1: success case + comp = self.image.aio_read(0, 20, cb) + comp.wait_for_complete_and_cb() + eq(retval[0], b'\0' * 20) + eq(comp.get_return_value(), 20) + eq(sys.getrefcount(comp), 2) + + # test2: error case + retval[0] = 1 + comp = self.image.aio_read(IMG_SIZE, 20, cb) + comp.wait_for_complete_and_cb() + eq(None, retval[0]) + assert(comp.get_return_value() < 0) + eq(sys.getrefcount(comp), 2) + + def test_aio_write(self): + retval = [None] + def cb(comp): + retval[0] = comp.get_return_value() + + data = rand_data(256) + comp = self.image.aio_write(data, 256, cb) + comp.wait_for_complete_and_cb() + eq(retval[0], 0) + eq(comp.get_return_value(), 0) + eq(sys.getrefcount(comp), 2) + eq(self.image.read(256, 256), data) + + def test_aio_discard(self): + retval = [None] + def cb(comp): + retval[0] = comp.get_return_value() + + data = rand_data(256) + self.image.write(data, 0) + comp = self.image.aio_discard(0, 256, cb) + comp.wait_for_complete_and_cb() + eq(retval[0], 0) + eq(comp.get_return_value(), 0) + eq(sys.getrefcount(comp), 2) + eq(self.image.read(256, 256), b'\0' * 256) + + def test_aio_write_zeroes(self): + retval = [None] + def cb(comp): + retval[0] = comp.get_return_value() + + data = rand_data(256) + self.image.write(data, 0) + comp = self.image.aio_write_zeroes(0, 256, cb) + comp.wait_for_complete_and_cb() + eq(retval[0], 0) + eq(comp.get_return_value(), 0) + eq(sys.getrefcount(comp), 2) + eq(self.image.read(256, 256), b'\0' * 256) + + def test_aio_flush(self): + retval = [None] + def cb(comp): + retval[0] = comp.get_return_value() + + comp = self.image.aio_flush(cb) + comp.wait_for_complete_and_cb() + eq(retval[0], 0) + eq(sys.getrefcount(comp), 2) + + def test_metadata(self): + metadata = list(self.image.metadata_list()) + eq(len(metadata), 0) + assert_raises(KeyError, self.image.metadata_get, "key1") + self.image.metadata_set("key1", "value1") + self.image.metadata_set("key2", "value2") + value = self.image.metadata_get("key1") + eq(value, "value1") + value = self.image.metadata_get("key2") + eq(value, "value2") + metadata = list(self.image.metadata_list()) + eq(len(metadata), 2) + self.image.metadata_remove("key1") + metadata = list(self.image.metadata_list()) + eq(len(metadata), 1) + eq(metadata[0], ("key2", "value2")) + self.image.metadata_remove("key2") + assert_raises(KeyError, self.image.metadata_remove, "key2") + metadata = list(self.image.metadata_list()) + eq(len(metadata), 0) + + N = 65 + for i in range(N): + self.image.metadata_set("key" + str(i), "X" * 1025) + metadata = list(self.image.metadata_list()) + eq(len(metadata), N) + for i in range(N): + self.image.metadata_remove("key" + str(i)) + metadata = list(self.image.metadata_list()) + eq(len(metadata), N - i - 1) + + def test_watchers_list(self): + watchers = list(self.image.watchers_list()) + # The image is open (in r/w mode) from setup, so expect there to be one + # watcher. + eq(len(watchers), 1) + + def test_config_list(self): + with Image(ioctx, image_name) as image: + for option in image.config_list(): + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + + image.metadata_set("conf_rbd_cache", "true") + + for option in image.config_list(): + if option['name'] == "rbd_cache": + eq(option['source'], RBD_CONFIG_SOURCE_IMAGE) + else: + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + + image.metadata_remove("conf_rbd_cache") + + for option in image.config_list(): + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + + def test_sparsify(self): + assert_raises(InvalidArgument, self.image.sparsify, 16) + self.image.sparsify(4096) + +class TestImageId(object): + + def setUp(self): + self.rbd = RBD() + create_image() + self.image = Image(ioctx, image_name) + self.image2 = Image(ioctx, None, None, False, self.image.id()) + + def tearDown(self): + self.image.close() + self.image2.close() + remove_image() + self.image = None + self.image2 = None + + def test_read(self): + data = self.image2.read(0, 20) + eq(data, b'\0' * 20) + + def test_write(self): + data = rand_data(256) + self.image2.write(data, 0) + + def test_resize(self): + new_size = IMG_SIZE * 2 + self.image2.resize(new_size) + info = self.image2.stat() + check_stat(info, new_size, IMG_ORDER) + +def check_diff(image, offset, length, from_snapshot, expected): + extents = [] + def cb(offset, length, exists): + extents.append((offset, length, exists)) + image.diff_iterate(0, IMG_SIZE, None, cb) + eq(extents, expected) + +class TestClone(object): + + @require_features([RBD_FEATURE_LAYERING]) + def setUp(self): + global ioctx + global features + self.rbd = RBD() + create_image() + self.image = Image(ioctx, image_name) + data = rand_data(256) + self.image.write(data, IMG_SIZE // 2) + self.image.create_snap('snap1') + global features + self.image.protect_snap('snap1') + self.clone_name = get_temp_image_name() + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, self.clone_name, + features) + self.clone = Image(ioctx, self.clone_name) + + def tearDown(self): + global ioctx + self.clone.close() + self.rbd.remove(ioctx, self.clone_name) + self.image.unprotect_snap('snap1') + self.image.remove_snap('snap1') + self.image.close() + remove_image() + + def _test_with_params(self, features=None, order=None, stripe_unit=None, + stripe_count=None): + self.image.create_snap('snap2') + self.image.protect_snap('snap2') + clone_name2 = get_temp_image_name() + if features is None: + self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2) + elif order is None: + self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2, + features) + elif stripe_unit is None: + self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2, + features, order) + elif stripe_count is None: + self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2, + features, order, stripe_unit) + else: + self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2, + features, order, stripe_unit, stripe_count) + self.rbd.remove(ioctx, clone_name2) + self.image.unprotect_snap('snap2') + self.image.remove_snap('snap2') + + def test_with_params(self): + self._test_with_params() + + def test_with_params2(self): + global features + self._test_with_params(features, self.image.stat()['order']) + + @require_features([RBD_FEATURE_STRIPINGV2]) + def test_with_params3(self): + global features + self._test_with_params(features, self.image.stat()['order'], + self.image.stripe_unit(), + self.image.stripe_count()) + + def test_unprotected(self): + self.image.create_snap('snap2') + global features + clone_name2 = get_temp_image_name() + rados.conf_set("rbd_default_clone_format", "1") + assert_raises(InvalidArgument, self.rbd.clone, ioctx, image_name, + 'snap2', ioctx, clone_name2, features) + rados.conf_set("rbd_default_clone_format", "auto") + self.image.remove_snap('snap2') + + def test_unprotect_with_children(self): + global features + # can't remove a snapshot that has dependent clones + assert_raises(ImageBusy, self.image.remove_snap, 'snap1') + + # validate parent info of clone created by TestClone.setUp + (pool, image, snap) = self.clone.parent_info() + eq(pool, pool_name) + eq(image, image_name) + eq(snap, 'snap1') + eq(self.image.id(), self.clone.parent_id()) + + # create a new pool... + pool_name2 = get_temp_pool_name() + rados.create_pool(pool_name2) + other_ioctx = rados.open_ioctx(pool_name2) + other_ioctx.application_enable('rbd') + + # ...with a clone of the same parent + other_clone_name = get_temp_image_name() + rados.conf_set("rbd_default_clone_format", "1") + self.rbd.clone(ioctx, image_name, 'snap1', other_ioctx, + other_clone_name, features) + rados.conf_set("rbd_default_clone_format", "auto") + self.other_clone = Image(other_ioctx, other_clone_name) + # validate its parent info + (pool, image, snap) = self.other_clone.parent_info() + eq(pool, pool_name) + eq(image, image_name) + eq(snap, 'snap1') + eq(self.image.id(), self.other_clone.parent_id()) + + # can't unprotect snap with children + assert_raises(ImageBusy, self.image.unprotect_snap, 'snap1') + + # 2 children, check that cannot remove the parent snap + assert_raises(ImageBusy, self.image.remove_snap, 'snap1') + + # close and remove other pool's clone + self.other_clone.close() + self.rbd.remove(other_ioctx, other_clone_name) + + # check that we cannot yet remove the parent snap + assert_raises(ImageBusy, self.image.remove_snap, 'snap1') + + other_ioctx.close() + rados.delete_pool(pool_name2) + + # unprotect, remove parent snap happen in cleanup, and should succeed + + def test_stat(self): + image_info = self.image.stat() + clone_info = self.clone.stat() + eq(clone_info['size'], image_info['size']) + eq(clone_info['size'], self.clone.overlap()) + + def test_resize_stat(self): + self.clone.resize(IMG_SIZE // 2) + image_info = self.image.stat() + clone_info = self.clone.stat() + eq(clone_info['size'], IMG_SIZE // 2) + eq(image_info['size'], IMG_SIZE) + eq(self.clone.overlap(), IMG_SIZE // 2) + + self.clone.resize(IMG_SIZE * 2) + image_info = self.image.stat() + clone_info = self.clone.stat() + eq(clone_info['size'], IMG_SIZE * 2) + eq(image_info['size'], IMG_SIZE) + eq(self.clone.overlap(), IMG_SIZE // 2) + + def test_resize_io(self): + parent_data = self.image.read(IMG_SIZE // 2, 256) + self.image.resize(0) + self.clone.resize(IMG_SIZE // 2 + 128) + child_data = self.clone.read(IMG_SIZE // 2, 128) + eq(child_data, parent_data[:128]) + self.clone.resize(IMG_SIZE) + child_data = self.clone.read(IMG_SIZE // 2, 256) + eq(child_data, parent_data[:128] + (b'\0' * 128)) + self.clone.resize(IMG_SIZE // 2 + 1) + child_data = self.clone.read(IMG_SIZE // 2, 1) + eq(child_data, parent_data[0:1]) + self.clone.resize(0) + self.clone.resize(IMG_SIZE) + child_data = self.clone.read(IMG_SIZE // 2, 256) + eq(child_data, b'\0' * 256) + + def test_read(self): + parent_data = self.image.read(IMG_SIZE // 2, 256) + child_data = self.clone.read(IMG_SIZE // 2, 256) + eq(child_data, parent_data) + + def test_write(self): + parent_data = self.image.read(IMG_SIZE // 2, 256) + new_data = rand_data(256) + self.clone.write(new_data, IMG_SIZE // 2 + 256) + child_data = self.clone.read(IMG_SIZE // 2 + 256, 256) + eq(child_data, new_data) + child_data = self.clone.read(IMG_SIZE // 2, 256) + eq(child_data, parent_data) + parent_data = self.image.read(IMG_SIZE // 2 + 256, 256) + eq(parent_data, b'\0' * 256) + + def check_children(self, expected): + actual = self.image.list_children() + # dedup for cache pools until + # http://tracker.ceph.com/issues/8187 is fixed + deduped = set([(pool_name, image[1]) for image in actual]) + eq(deduped, set(expected)) + + def check_children2(self, expected): + actual = [{k:v for k,v in x.items() if k in expected[0]} \ + for x in self.image.list_children2()] + eq(actual, expected) + + def check_descendants(self, expected): + eq(list(self.image.list_descendants()), expected) + + def get_image_id(self, ioctx, name): + with Image(ioctx, name) as image: + return image.id() + + def test_list_children(self): + global ioctx + global features + self.image.set_snap('snap1') + self.check_children([(pool_name, self.clone_name)]) + self.check_children2( + [{'pool': pool_name, 'pool_namespace': '', + 'image': self.clone_name, 'trash': False, + 'id': self.get_image_id(ioctx, self.clone_name)}]) + self.check_descendants( + [{'pool': pool_name, 'pool_namespace': '', + 'image': self.clone_name, 'trash': False, + 'id': self.get_image_id(ioctx, self.clone_name)}]) + self.clone.close() + self.rbd.remove(ioctx, self.clone_name) + eq(self.image.list_children(), []) + eq(list(self.image.list_children2()), []) + eq(list(self.image.list_descendants()), []) + + clone_name = get_temp_image_name() + '_' + expected_children = [] + expected_children2 = [] + for i in range(10): + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, + clone_name + str(i), features) + expected_children.append((pool_name, clone_name + str(i))) + expected_children2.append( + {'pool': pool_name, 'pool_namespace': '', + 'image': clone_name + str(i), 'trash': False, + 'id': self.get_image_id(ioctx, clone_name + str(i))}) + self.check_children(expected_children) + self.check_children2(expected_children2) + self.check_descendants(expected_children2) + + image6_id = self.get_image_id(ioctx, clone_name + str(5)) + RBD().trash_move(ioctx, clone_name + str(5), 0) + expected_children.remove((pool_name, clone_name + str(5))) + for item in expected_children2: + for k, v in item.items(): + if v == image6_id: + item["trash"] = True + self.check_children(expected_children) + self.check_children2(expected_children2) + self.check_descendants(expected_children2) + + RBD().trash_restore(ioctx, image6_id, clone_name + str(5)) + expected_children.append((pool_name, clone_name + str(5))) + for item in expected_children2: + for k, v in item.items(): + if v == image6_id: + item["trash"] = False + self.check_children(expected_children) + self.check_children2(expected_children2) + self.check_descendants(expected_children2) + + for i in range(10): + self.rbd.remove(ioctx, clone_name + str(i)) + expected_children.remove((pool_name, clone_name + str(i))) + expected_children2.pop(0) + self.check_children(expected_children) + self.check_children2(expected_children2) + self.check_descendants(expected_children2) + + eq(self.image.list_children(), []) + eq(list(self.image.list_children2()), []) + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, self.clone_name, + features) + self.check_children([(pool_name, self.clone_name)]) + self.check_children2( + [{'pool': pool_name, 'pool_namespace': '', + 'image': self.clone_name, 'trash': False, + 'id': self.get_image_id(ioctx, self.clone_name)}]) + self.check_descendants( + [{'pool': pool_name, 'pool_namespace': '', + 'image': self.clone_name, 'trash': False, + 'id': self.get_image_id(ioctx, self.clone_name)}]) + self.clone = Image(ioctx, self.clone_name) + + def test_flatten_errors(self): + # test that we can't flatten a non-clone + assert_raises(InvalidArgument, self.image.flatten) + + # test that we can't flatten a snapshot + self.clone.create_snap('snap2') + self.clone.set_snap('snap2') + assert_raises(ReadOnlyImage, self.clone.flatten) + self.clone.remove_snap('snap2') + + def check_flatten_with_order(self, new_order): + global ioctx + global features + clone_name2 = get_temp_image_name() + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name2, + features, new_order) + #with Image(ioctx, 'clone2') as clone: + clone2 = Image(ioctx, clone_name2) + clone2.flatten() + eq(clone2.overlap(), 0) + clone2.close() + self.rbd.remove(ioctx, clone_name2) + + # flatten after resizing to non-block size + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name2, + features, new_order) + with Image(ioctx, clone_name2) as clone: + clone.resize(IMG_SIZE // 2 - 1) + clone.flatten() + eq(0, clone.overlap()) + self.rbd.remove(ioctx, clone_name2) + + # flatten after resizing to non-block size + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name2, + features, new_order) + with Image(ioctx, clone_name2) as clone: + clone.resize(IMG_SIZE // 2 + 1) + clone.flatten() + eq(clone.overlap(), 0) + self.rbd.remove(ioctx, clone_name2) + + def test_flatten_basic(self): + self.check_flatten_with_order(IMG_ORDER) + + def test_flatten_smaller_order(self): + self.check_flatten_with_order(IMG_ORDER - 2) + + def test_flatten_larger_order(self): + self.check_flatten_with_order(IMG_ORDER + 2) + + def test_flatten_drops_cache(self): + global ioctx + global features + clone_name2 = get_temp_image_name() + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name2, + features, IMG_ORDER) + with Image(ioctx, clone_name2) as clone: + with Image(ioctx, clone_name2) as clone2: + # cache object non-existence + data = clone.read(IMG_SIZE // 2, 256) + clone2_data = clone2.read(IMG_SIZE // 2, 256) + eq(data, clone2_data) + clone.flatten() + assert_raises(ImageNotFound, clone.parent_info) + assert_raises(ImageNotFound, clone2.parent_info) + assert_raises(ImageNotFound, clone.parent_id) + assert_raises(ImageNotFound, clone2.parent_id) + after_flatten = clone.read(IMG_SIZE // 2, 256) + eq(data, after_flatten) + after_flatten = clone2.read(IMG_SIZE // 2, 256) + eq(data, after_flatten) + self.rbd.remove(ioctx, clone_name2) + + def test_flatten_multi_level(self): + self.clone.create_snap('snap2') + self.clone.protect_snap('snap2') + clone_name3 = get_temp_image_name() + self.rbd.clone(ioctx, self.clone_name, 'snap2', ioctx, clone_name3, + features) + self.clone.flatten() + with Image(ioctx, clone_name3) as clone3: + clone3.flatten() + self.clone.unprotect_snap('snap2') + self.clone.remove_snap('snap2') + self.rbd.remove(ioctx, clone_name3) + + def test_flatten_with_progress(self): + d = {'received_callback': False} + def progress_cb(current, total): + d['received_callback'] = True + return 0 + + global ioctx + global features + clone_name = get_temp_image_name() + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name, + features, 0) + with Image(ioctx, clone_name) as clone: + clone.flatten(on_progress=progress_cb) + self.rbd.remove(ioctx, clone_name) + eq(True, d['received_callback']) + + def test_resize_flatten_multi_level(self): + self.clone.create_snap('snap2') + self.clone.protect_snap('snap2') + clone_name3 = get_temp_image_name() + self.rbd.clone(ioctx, self.clone_name, 'snap2', ioctx, clone_name3, + features) + self.clone.resize(1) + orig_data = self.image.read(0, 256) + with Image(ioctx, clone_name3) as clone3: + clone3_data = clone3.read(0, 256) + eq(orig_data, clone3_data) + self.clone.flatten() + with Image(ioctx, clone_name3) as clone3: + clone3_data = clone3.read(0, 256) + eq(orig_data, clone3_data) + self.rbd.remove(ioctx, clone_name3) + self.clone.unprotect_snap('snap2') + self.clone.remove_snap('snap2') + + def test_trash_snapshot(self): + self.image.create_snap('snap2') + global features + clone_name = get_temp_image_name() + rados.conf_set("rbd_default_clone_format", "2") + self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name, features) + rados.conf_set("rbd_default_clone_format", "auto") + + self.image.remove_snap('snap2') + + snaps = [s for s in self.image.list_snaps() if s['name'] != 'snap1'] + eq([RBD_SNAP_NAMESPACE_TYPE_TRASH], [s['namespace'] for s in snaps]) + eq([{'original_name' : 'snap2'}], [s['trash'] for s in snaps]) + + self.rbd.remove(ioctx, clone_name) + eq([], [s for s in self.image.list_snaps() if s['name'] != 'snap1']) + +class TestExclusiveLock(object): + + @require_features([RBD_FEATURE_EXCLUSIVE_LOCK]) + def setUp(self): + global rados2 + rados2 = Rados(conffile='') + rados2.connect() + global ioctx2 + ioctx2 = rados2.open_ioctx(pool_name) + create_image() + + def tearDown(self): + remove_image() + global ioctx2 + ioctx2.close() + global rados2 + rados2.shutdown() + + def test_ownership(self): + with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: + image1.write(b'0'*256, 0) + eq(image1.is_exclusive_lock_owner(), True) + eq(image2.is_exclusive_lock_owner(), False) + + def test_snapshot_leadership(self): + with Image(ioctx, image_name) as image: + image.create_snap('snap') + eq(image.is_exclusive_lock_owner(), True) + try: + with Image(ioctx, image_name) as image: + image.write(b'0'*256, 0) + eq(image.is_exclusive_lock_owner(), True) + image.set_snap('snap') + eq(image.is_exclusive_lock_owner(), False) + with Image(ioctx, image_name, snapshot='snap') as image: + eq(image.is_exclusive_lock_owner(), False) + finally: + with Image(ioctx, image_name) as image: + image.remove_snap('snap') + + def test_read_only_leadership(self): + with Image(ioctx, image_name, read_only=True) as image: + eq(image.is_exclusive_lock_owner(), False) + + def test_follower_flatten(self): + with Image(ioctx, image_name) as image: + image.create_snap('snap') + image.protect_snap('snap') + try: + RBD().clone(ioctx, image_name, 'snap', ioctx, 'clone', features) + with Image(ioctx, 'clone') as image1, Image(ioctx2, 'clone') as image2: + data = rand_data(256) + image1.write(data, 0) + image2.flatten() + assert_raises(ImageNotFound, image1.parent_info) + assert_raises(ImageNotFound, image1.parent_id) + parent = True + for x in range(30): + try: + image2.parent_info() + except ImageNotFound: + parent = False + break + eq(False, parent) + finally: + RBD().remove(ioctx, 'clone') + with Image(ioctx, image_name) as image: + image.unprotect_snap('snap') + image.remove_snap('snap') + + def test_follower_resize(self): + with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: + image1.write(b'0'*256, 0) + for new_size in [IMG_SIZE * 2, IMG_SIZE // 2]: + image2.resize(new_size); + eq(new_size, image1.size()) + for x in range(30): + if new_size == image2.size(): + break + time.sleep(1) + eq(new_size, image2.size()) + + def test_follower_snap_create(self): + with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: + image2.create_snap('snap1') + image1.remove_snap('snap1') + + def test_follower_snap_rollback(self): + with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: + image1.create_snap('snap') + try: + assert_raises(ReadOnlyImage, image2.rollback_to_snap, 'snap') + image1.rollback_to_snap('snap') + finally: + image1.remove_snap('snap') + + def test_follower_discard(self): + global rados + with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: + data = rand_data(256) + image1.write(data, 0) + image2.discard(0, 256) + eq(image1.is_exclusive_lock_owner(), False) + eq(image2.is_exclusive_lock_owner(), True) + read = image2.read(0, 256) + if rados.conf_get('rbd_skip_partial_discard') == 'false': + eq(256 * b'\0', read) + else: + eq(data, read) + + def test_follower_write(self): + with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: + data = rand_data(256) + image1.write(data, 0) + image2.write(data, IMG_SIZE // 2) + eq(image1.is_exclusive_lock_owner(), False) + eq(image2.is_exclusive_lock_owner(), True) + for offset in [0, IMG_SIZE // 2]: + read = image2.read(offset, 256) + eq(data, read) + def test_acquire_release_lock(self): + with Image(ioctx, image_name) as image: + image.lock_acquire(RBD_LOCK_MODE_EXCLUSIVE) + image.lock_release() + + def test_break_lock(self): + blacklist_rados = Rados(conffile='') + blacklist_rados.connect() + try: + blacklist_ioctx = blacklist_rados.open_ioctx(pool_name) + try: + rados2.conf_set('rbd_blacklist_on_break_lock', 'true') + with Image(ioctx2, image_name) as image, \ + Image(blacklist_ioctx, image_name) as blacklist_image: + + lock_owners = list(image.lock_get_owners()) + eq(0, len(lock_owners)) + + blacklist_image.lock_acquire(RBD_LOCK_MODE_EXCLUSIVE) + assert_raises(ReadOnlyImage, image.lock_acquire, + RBD_LOCK_MODE_EXCLUSIVE) + lock_owners = list(image.lock_get_owners()) + eq(1, len(lock_owners)) + eq(RBD_LOCK_MODE_EXCLUSIVE, lock_owners[0]['mode']) + image.lock_break(RBD_LOCK_MODE_EXCLUSIVE, + lock_owners[0]['owner']) + + assert_raises(ConnectionShutdown, + blacklist_image.is_exclusive_lock_owner) + + blacklist_rados.wait_for_latest_osdmap() + data = rand_data(256) + assert_raises(ConnectionShutdown, + blacklist_image.write, data, 0) + + image.lock_acquire(RBD_LOCK_MODE_EXCLUSIVE) + + try: + blacklist_image.close() + except ConnectionShutdown: + pass + finally: + blacklist_ioctx.close() + finally: + blacklist_rados.shutdown() + +class TestMirroring(object): + + @staticmethod + def check_info(info, global_id, state, primary=None): + eq(global_id, info['global_id']) + eq(state, info['state']) + if primary is not None: + eq(primary, info['primary']) + + def setUp(self): + self.rbd = RBD() + self.initial_mirror_mode = self.rbd.mirror_mode_get(ioctx) + self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_POOL) + create_image() + self.image = Image(ioctx, image_name) + + def tearDown(self): + self.image.close() + remove_image() + self.rbd.mirror_mode_set(ioctx, self.initial_mirror_mode) + + def test_site_name(self): + site_name = "us-west-1" + self.rbd.mirror_site_name_set(rados, site_name) + eq(site_name, self.rbd.mirror_site_name_get(rados)) + self.rbd.mirror_site_name_set(rados, "") + eq(rados.get_fsid(), self.rbd.mirror_site_name_get(rados)) + + def test_mirror_peer_bootstrap(self): + eq([], list(self.rbd.mirror_peer_list(ioctx))) + + self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_DISABLED) + assert_raises(InvalidArgument, self.rbd.mirror_peer_bootstrap_create, + ioctx); + + self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_POOL) + token_b64 = self.rbd.mirror_peer_bootstrap_create(ioctx) + token = base64.b64decode(token_b64) + token_dict = json.loads(token) + eq(sorted(['fsid', 'client_id', 'key', 'mon_host']), + sorted(list(token_dict.keys()))) + + # requires different cluster + assert_raises(InvalidArgument, self.rbd.mirror_peer_bootstrap_import, + ioctx, RBD_MIRROR_PEER_DIRECTION_RX, token_b64) + + def test_mirror_peer(self): + eq([], list(self.rbd.mirror_peer_list(ioctx))) + cluster_name = "test_cluster" + client_name = "test_client" + uuid = self.rbd.mirror_peer_add(ioctx, cluster_name, client_name) + assert(uuid) + peer = { + 'uuid' : uuid, + 'cluster_name' : cluster_name, + 'client_name' : client_name, + } + eq([peer], list(self.rbd.mirror_peer_list(ioctx))) + cluster_name = "test_cluster1" + self.rbd.mirror_peer_set_cluster(ioctx, uuid, cluster_name) + client_name = "test_client1" + self.rbd.mirror_peer_set_client(ioctx, uuid, client_name) + peer = { + 'uuid' : uuid, + 'cluster_name' : cluster_name, + 'client_name' : client_name, + } + eq([peer], list(self.rbd.mirror_peer_list(ioctx))) + + attribs = { + RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST: 'host1', + RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY: 'abc' + } + self.rbd.mirror_peer_set_attributes(ioctx, uuid, attribs) + eq(attribs, self.rbd.mirror_peer_get_attributes(ioctx, uuid)) + + self.rbd.mirror_peer_remove(ioctx, uuid) + eq([], list(self.rbd.mirror_peer_list(ioctx))) + + @require_features([RBD_FEATURE_EXCLUSIVE_LOCK, + RBD_FEATURE_JOURNALING]) + def test_mirror_image(self): + + self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_IMAGE) + self.image.mirror_image_disable(True) + info = self.image.mirror_image_get_info() + self.check_info(info, '', RBD_MIRROR_IMAGE_DISABLED, False) + + self.image.mirror_image_enable() + info = self.image.mirror_image_get_info() + global_id = info['global_id'] + self.check_info(info, global_id, RBD_MIRROR_IMAGE_ENABLED, True) + + self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_POOL) + fail = False + try: + self.image.mirror_image_disable(True) + except InvalidArgument: + fail = True + eq(True, fail) # Fails because of mirror mode pool + + self.image.mirror_image_demote() + info = self.image.mirror_image_get_info() + self.check_info(info, global_id, RBD_MIRROR_IMAGE_ENABLED, False) + + self.image.mirror_image_resync() + + self.image.mirror_image_promote(True) + info = self.image.mirror_image_get_info() + self.check_info(info, global_id, RBD_MIRROR_IMAGE_ENABLED, True) + + fail = False + try: + self.image.mirror_image_resync() + except InvalidArgument: + fail = True + eq(True, fail) # Fails because it is primary + + status = self.image.mirror_image_get_status() + eq(image_name, status['name']) + eq(False, status['up']) + eq(MIRROR_IMAGE_STATUS_STATE_UNKNOWN, status['state']) + info = status['info'] + self.check_info(info, global_id, RBD_MIRROR_IMAGE_ENABLED, True) + + @require_features([RBD_FEATURE_EXCLUSIVE_LOCK, + RBD_FEATURE_JOURNALING]) + def test_mirror_image_status(self): + info = self.image.mirror_image_get_info() + global_id = info['global_id'] + state = info['state'] + primary = info['primary'] + + status = self.image.mirror_image_get_status() + eq(image_name, status['name']) + eq(False, status['up']) + eq(MIRROR_IMAGE_STATUS_STATE_UNKNOWN, status['state']) + info = status['info'] + self.check_info(info, global_id, state, primary) + + images = list(self.rbd.mirror_image_status_list(ioctx)) + eq(1, len(images)) + status = images[0] + eq(image_name, status['name']) + eq(False, status['up']) + eq(MIRROR_IMAGE_STATUS_STATE_UNKNOWN, status['state']) + info = status['info'] + self.check_info(info, global_id, state) + + states = self.rbd.mirror_image_status_summary(ioctx) + eq([(MIRROR_IMAGE_STATUS_STATE_UNKNOWN, 1)], states) + + assert_raises(ImageNotFound, self.image.mirror_image_get_instance_id) + instance_ids = list(self.rbd.mirror_image_instance_id_list(ioctx)) + eq(0, len(instance_ids)) + + N = 65 + for i in range(N): + self.rbd.create(ioctx, image_name + str(i), IMG_SIZE, IMG_ORDER, + old_format=False, features=int(features)) + images = list(self.rbd.mirror_image_status_list(ioctx)) + eq(N + 1, len(images)) + for i in range(N): + self.rbd.remove(ioctx, image_name + str(i)) + + +class TestTrash(object): + + def setUp(self): + global rados2 + rados2 = Rados(conffile='') + rados2.connect() + global ioctx2 + ioctx2 = rados2.open_ioctx(pool_name) + + def tearDown(self): + global ioctx2 + ioctx2.close() + global rados2 + rados2.shutdown() + + def test_move(self): + create_image() + with Image(ioctx, image_name) as image: + image_id = image.id() + + RBD().trash_move(ioctx, image_name, 1000) + RBD().trash_remove(ioctx, image_id, True) + + def test_purge(self): + create_image() + with Image(ioctx, image_name) as image: + image_name1 = image_name + image_id1 = image.id() + + create_image() + with Image(ioctx, image_name) as image: + image_name2 = image_name + image_id2 = image.id() + + RBD().trash_move(ioctx, image_name1, 0) + RBD().trash_move(ioctx, image_name2, 1000) + RBD().trash_purge(ioctx, datetime.now()) + + entries = list(RBD().trash_list(ioctx)) + eq([image_id2], [x['id'] for x in entries]) + RBD().trash_remove(ioctx, image_id2, True) + + def test_remove_denied(self): + create_image() + with Image(ioctx, image_name) as image: + image_id = image.id() + + RBD().trash_move(ioctx, image_name, 1000) + assert_raises(PermissionError, RBD().trash_remove, ioctx, image_id) + RBD().trash_remove(ioctx, image_id, True) + + def test_remove(self): + create_image() + with Image(ioctx, image_name) as image: + image_id = image.id() + + RBD().trash_move(ioctx, image_name, 0) + RBD().trash_remove(ioctx, image_id) + + def test_remove_with_progress(self): + d = {'received_callback': False} + def progress_cb(current, total): + d['received_callback'] = True + return 0 + + create_image() + with Image(ioctx, image_name) as image: + image_id = image.id() + + RBD().trash_move(ioctx, image_name, 0) + RBD().trash_remove(ioctx, image_id, on_progress=progress_cb) + eq(True, d['received_callback']) + + def test_get(self): + create_image() + with Image(ioctx, image_name) as image: + image_id = image.id() + + RBD().trash_move(ioctx, image_name, 1000) + + info = RBD().trash_get(ioctx, image_id) + eq(image_id, info['id']) + eq(image_name, info['name']) + eq('USER', info['source']) + assert(info['deferment_end_time'] > info['deletion_time']) + + RBD().trash_remove(ioctx, image_id, True) + + def test_list(self): + create_image() + with Image(ioctx, image_name) as image: + image_id1 = image.id() + image_name1 = image_name + RBD().trash_move(ioctx, image_name, 1000) + + create_image() + with Image(ioctx, image_name) as image: + image_id2 = image.id() + image_name2 = image_name + RBD().trash_move(ioctx, image_name, 1000) + + entries = list(RBD().trash_list(ioctx)) + for e in entries: + if e['id'] == image_id1: + eq(e['name'], image_name1) + elif e['id'] == image_id2: + eq(e['name'], image_name2) + else: + assert False + eq(e['source'], 'USER') + assert e['deferment_end_time'] > e['deletion_time'] + + RBD().trash_remove(ioctx, image_id1, True) + RBD().trash_remove(ioctx, image_id2, True) + + def test_restore(self): + create_image() + with Image(ioctx, image_name) as image: + image_id = image.id() + RBD().trash_move(ioctx, image_name, 1000) + RBD().trash_restore(ioctx, image_id, image_name) + remove_image() + +def test_create_group(): + create_group() + remove_group() + +def test_rename_group(): + create_group() + if group_name is not None: + rename_group() + eq(["new" + group_name], RBD().group_list(ioctx)) + RBD().group_remove(ioctx, "new" + group_name) + else: + remove_group() + +def test_list_groups_empty(): + eq([], RBD().group_list(ioctx)) + +@with_setup(create_group, remove_group) +def test_list_groups(): + eq([group_name], RBD().group_list(ioctx)) + +@with_setup(create_group) +def test_list_groups_after_removed(): + remove_group() + eq([], RBD().group_list(ioctx)) + +class TestGroups(object): + + def setUp(self): + global snap_name + self.rbd = RBD() + create_image() + self.image_names = [image_name] + self.image = Image(ioctx, image_name) + + create_group() + snap_name = get_temp_snap_name() + self.group = Group(ioctx, group_name) + + def tearDown(self): + remove_group() + self.image = None + for name in self.image_names: + RBD().remove(ioctx, name) + + def test_group_image_add(self): + self.group.add_image(ioctx, image_name) + + def test_group_image_list_empty(self): + eq([], list(self.group.list_images())) + + def test_group_image_list(self): + eq([], list(self.group.list_images())) + self.group.add_image(ioctx, image_name) + eq([image_name], [img['name'] for img in self.group.list_images()]) + + def test_group_image_list_move_to_trash(self): + eq([], list(self.group.list_images())) + with Image(ioctx, image_name) as image: + image_id = image.id() + self.group.add_image(ioctx, image_name) + eq([image_name], [img['name'] for img in self.group.list_images()]) + RBD().trash_move(ioctx, image_name, 0) + eq([], list(self.group.list_images())) + RBD().trash_restore(ioctx, image_id, image_name) + + def test_group_image_many_images(self): + eq([], list(self.group.list_images())) + self.group.add_image(ioctx, image_name) + + for x in range(0, 20): + create_image() + self.image_names.append(image_name) + self.group.add_image(ioctx, image_name) + + self.image_names.sort() + answer = [img['name'] for img in self.group.list_images()] + answer.sort() + eq(self.image_names, answer) + + def test_group_image_remove(self): + eq([], list(self.group.list_images())) + self.group.add_image(ioctx, image_name) + with Image(ioctx, image_name) as image: + eq(RBD_OPERATION_FEATURE_GROUP, + image.op_features() & RBD_OPERATION_FEATURE_GROUP) + group = image.group() + eq(group_name, group['name']) + + eq([image_name], [img['name'] for img in self.group.list_images()]) + self.group.remove_image(ioctx, image_name) + eq([], list(self.group.list_images())) + with Image(ioctx, image_name) as image: + eq(0, image.op_features() & RBD_OPERATION_FEATURE_GROUP) + + def test_group_snap(self): + global snap_name + eq([], list(self.group.list_snaps())) + self.group.create_snap(snap_name) + eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) + + for snap in self.image.list_snaps(): + eq(rbd.RBD_SNAP_NAMESPACE_TYPE_GROUP, snap['namespace']) + info = snap['group'] + eq(group_name, info['group_name']) + eq(snap_name, info['group_snap_name']) + + self.group.remove_snap(snap_name) + eq([], list(self.group.list_snaps())) + + def test_group_snap_list_many(self): + global snap_name + eq([], list(self.group.list_snaps())) + snap_names = [] + for x in range(0, 20): + snap_names.append(snap_name) + self.group.create_snap(snap_name) + snap_name = get_temp_snap_name() + + snap_names.sort() + answer = [snap['name'] for snap in self.group.list_snaps()] + answer.sort() + eq(snap_names, answer) + + def test_group_snap_namespace(self): + global snap_name + eq([], list(self.group.list_snaps())) + self.group.add_image(ioctx, image_name) + self.group.create_snap(snap_name) + eq(1, len([snap['name'] for snap in self.image.list_snaps()])) + self.group.remove_image(ioctx, image_name) + self.group.remove_snap(snap_name) + eq([], list(self.group.list_snaps())) + + def test_group_snap_rename(self): + global snap_name + new_snap_name = "new" + snap_name + + eq([], list(self.group.list_snaps())) + self.group.create_snap(snap_name) + eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) + self.group.rename_snap(snap_name, new_snap_name) + eq([new_snap_name], [snap['name'] for snap in self.group.list_snaps()]) + self.group.remove_snap(new_snap_name) + eq([], list(self.group.list_snaps())) + + def test_group_snap_rollback(self): + eq([], list(self.group.list_images())) + self.group.add_image(ioctx, image_name) + with Image(ioctx, image_name) as image: + image.write(b'\0' * 256, 0) + read = image.read(0, 256) + eq(read, b'\0' * 256) + + global snap_name + eq([], list(self.group.list_snaps())) + self.group.create_snap(snap_name) + eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) + + with Image(ioctx, image_name) as image: + data = rand_data(256) + image.write(data, 0) + read = image.read(0, 256) + eq(read, data) + + self.group.rollback_to_snap(snap_name) + with Image(ioctx, image_name) as image: + read = image.read(0, 256) + eq(read, b'\0' * 256) + + self.group.remove_image(ioctx, image_name) + eq([], list(self.group.list_images())) + self.group.remove_snap(snap_name) + eq([], list(self.group.list_snaps())) + +@with_setup(create_image, remove_image) +def test_rename(): + rbd = RBD() + image_name2 = get_temp_image_name() + +class TestMigration(object): + + def test_migration(self): + create_image() + RBD().migration_prepare(ioctx, image_name, ioctx, image_name, features=63, + order=23, stripe_unit=1<<23, stripe_count=1, + data_pool=None) + + status = RBD().migration_status(ioctx, image_name) + eq(image_name, status['source_image_name']) + eq(image_name, status['dest_image_name']) + eq(RBD_IMAGE_MIGRATION_STATE_PREPARED, status['state']) + + RBD().migration_execute(ioctx, image_name) + RBD().migration_commit(ioctx, image_name) + remove_image() + + def test_migration_with_progress(self): + d = {'received_callback': False} + def progress_cb(current, total): + d['received_callback'] = True + return 0 + + create_image() + RBD().migration_prepare(ioctx, image_name, ioctx, image_name, features=63, + order=23, stripe_unit=1<<23, stripe_count=1, + data_pool=None) + RBD().migration_execute(ioctx, image_name, on_progress=progress_cb) + eq(True, d['received_callback']) + d['received_callback'] = False + + RBD().migration_commit(ioctx, image_name, on_progress=progress_cb) + eq(True, d['received_callback']) + remove_image() + + def test_migrate_abort(self): + create_image() + RBD().migration_prepare(ioctx, image_name, ioctx, image_name, features=63, + order=23, stripe_unit=1<<23, stripe_count=1, + data_pool=None) + RBD().migration_abort(ioctx, image_name) + remove_image() + + def test_migrate_abort_with_progress(self): + d = {'received_callback': False} + def progress_cb(current, total): + d['received_callback'] = True + return 0 + + create_image() + RBD().migration_prepare(ioctx, image_name, ioctx, image_name, features=63, + order=23, stripe_unit=1<<23, stripe_count=1, + data_pool=None) + RBD().migration_abort(ioctx, image_name, on_progress=progress_cb) + eq(True, d['received_callback']) + remove_image() diff --git a/src/test/pybind/test_rgwfs.py b/src/test/pybind/test_rgwfs.py new file mode 100644 index 00000000..06a88d16 --- /dev/null +++ b/src/test/pybind/test_rgwfs.py @@ -0,0 +1,144 @@ +# vim: expandtab smarttab shiftwidth=4 softtabstop=4 +from nose.tools import assert_raises, assert_equal, with_setup +import rgw as librgwfs + +rgwfs = None +root_handler = None +root_dir_handler = None + + +def setup_module(): + global rgwfs + global root_handler + rgwfs = librgwfs.LibRGWFS("testid", "", "") + root_handler = rgwfs.mount() + + +def teardown_module(): + global rgwfs + rgwfs.shutdown() + + +def setup_test(): + global root_dir_handler + + names = [] + + try: + root_dir_handler = rgwfs.opendir(root_handler, b"bucket", 0) + except Exception: + root_dir_handler = rgwfs.mkdir(root_handler, b"bucket", 0) + + def cb(name, offset, flags): + names.append(name) + rgwfs.readdir(root_dir_handler, cb, 0, 0) + for name in names: + rgwfs.unlink(root_dir_handler, name, 0) + + +@with_setup(setup_test) +def test_version(): + rgwfs.version() + + +@with_setup(setup_test) +def test_fstat(): + stat = rgwfs.fstat(root_dir_handler) + assert(len(stat) == 13) + file_handler = rgwfs.create(root_dir_handler, b'file-1', 0) + stat = rgwfs.fstat(file_handler) + assert(len(stat) == 13) + rgwfs.close(file_handler) + + +@with_setup(setup_test) +def test_statfs(): + stat = rgwfs.statfs() + assert(len(stat) == 11) + + +@with_setup(setup_test) +def test_fsync(): + fd = rgwfs.create(root_dir_handler, b'file-1', 0) + rgwfs.write(fd, 0, b"asdf") + rgwfs.fsync(fd, 0) + rgwfs.write(fd, 4, b"qwer") + rgwfs.fsync(fd, 1) + rgwfs.close(fd) + + +@with_setup(setup_test) +def test_directory(): + dir_handler = rgwfs.mkdir(root_dir_handler, b"temp-directory", 0) + rgwfs.close(dir_handler) + rgwfs.unlink(root_dir_handler, b"temp-directory") + + +@with_setup(setup_test) +def test_walk_dir(): + dirs = [b"dir-1", b"dir-2", b"dir-3"] + handles = [] + for i in dirs: + d = rgwfs.mkdir(root_dir_handler, i, 0) + handles.append(d) + entries = [] + + def cb(name, offset): + entries.append((name, offset)) + + offset, eof = rgwfs.readdir(root_dir_handler, cb, 0) + + for i in handles: + rgwfs.close(i) + + for name, _ in entries: + assert(name in dirs) + rgwfs.unlink(root_dir_handler, name) + + +@with_setup(setup_test) +def test_rename(): + file_handler = rgwfs.create(root_dir_handler, b"a", 0) + rgwfs.close(file_handler) + rgwfs.rename(root_dir_handler, b"a", root_dir_handler, b"b") + file_handler = rgwfs.open(root_dir_handler, b"b", 0) + rgwfs.fstat(file_handler) + rgwfs.close(file_handler) + rgwfs.unlink(root_dir_handler, b"b") + + +@with_setup(setup_test) +def test_open(): + assert_raises(librgwfs.ObjectNotFound, rgwfs.open, + root_dir_handler, b'file-1', 0) + assert_raises(librgwfs.ObjectNotFound, rgwfs.open, + root_dir_handler, b'file-1', 0) + fd = rgwfs.create(root_dir_handler, b'file-1', 0) + rgwfs.write(fd, 0, b"asdf") + rgwfs.close(fd) + fd = rgwfs.open(root_dir_handler, b'file-1', 0) + assert_equal(rgwfs.read(fd, 0, 4), b"asdf") + rgwfs.close(fd) + fd = rgwfs.open(root_dir_handler, b'file-1', 0) + rgwfs.write(fd, 0, b"aaaazxcv") + rgwfs.close(fd) + fd = rgwfs.open(root_dir_handler, b'file-1', 0) + assert_equal(rgwfs.read(fd, 4, 4), b"zxcv") + rgwfs.close(fd) + fd = rgwfs.open(root_dir_handler, b'file-1', 0) + assert_equal(rgwfs.read(fd, 0, 4), b"aaaa") + rgwfs.close(fd) + rgwfs.unlink(root_dir_handler, b"file-1") + + +@with_setup(setup_test) +def test_mount_unmount(): + global root_handler + global root_dir_handler + test_directory() + rgwfs.close(root_dir_handler) + rgwfs.close(root_handler) + rgwfs.unmount() + root_handler = rgwfs.mount() + root_dir_handler = rgwfs.opendir(root_handler, b"bucket", 0) + test_open() |