diff options
Diffstat (limited to 'src/test/pybind')
-rw-r--r-- | src/test/pybind/CMakeLists.txt | 4 | ||||
-rwxr-xr-x | src/test/pybind/test_ceph_argparse.py | 1334 | ||||
-rwxr-xr-x | src/test/pybind/test_ceph_daemon.py | 52 | ||||
-rw-r--r-- | src/test/pybind/test_cephfs.py | 906 | ||||
-rw-r--r-- | src/test/pybind/test_rados.py | 1538 | ||||
-rw-r--r-- | src/test/pybind/test_rbd.py | 2771 | ||||
-rw-r--r-- | src/test/pybind/test_rgwfs.py | 144 |
7 files changed, 6749 insertions, 0 deletions
diff --git a/src/test/pybind/CMakeLists.txt b/src/test/pybind/CMakeLists.txt new file mode 100644 index 000000000..b2f0552a4 --- /dev/null +++ b/src/test/pybind/CMakeLists.txt @@ -0,0 +1,4 @@ +add_ceph_test(test_ceph_daemon.py + ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_ceph_daemon.py) +add_ceph_test(test_ceph_argparse.py + ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_ceph_argparse.py) diff --git a/src/test/pybind/test_ceph_argparse.py b/src/test/pybind/test_ceph_argparse.py new file mode 100755 index 000000000..6b0d065db --- /dev/null +++ b/src/test/pybind/test_ceph_argparse.py @@ -0,0 +1,1334 @@ +#!/usr/bin/env python3 +# -*- mode:python; tab-width:4; indent-tabs-mode:nil; coding:utf-8 -*- +# vim: ts=4 sw=4 smarttab expandtab fileencoding=utf-8 +# +# Ceph - scalable distributed file system +# +# Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com> +# Copyright (C) 2014 Red Hat <contact@redhat.com> +# +# Author: Loic Dachary <loic@dachary.org> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# + +from ceph_argparse import validate_command, parse_json_funcsigs, validate, \ + parse_funcsig, ArgumentError, ArgumentTooFew, ArgumentMissing, \ + ArgumentNumber, ArgumentValid + +import os +import random +import re +import string +import sys +import unittest +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + + +def get_command_descriptions(what): + CEPH_BIN = os.environ.get('CEPH_BIN', ".") + return os.popen(CEPH_BIN + "/get_command_descriptions " + "--" + what).read() + + +class ParseJsonFuncsigs(unittest.TestCase): + def test_parse_json_funcsigs(self): + commands = get_command_descriptions("all") + cmd_json = parse_json_funcsigs(commands, 'cli') + + # syntax error https://github.com/ceph/ceph/pull/585 + commands = get_command_descriptions("pull585") + self.assertRaises(TypeError, parse_json_funcsigs, commands, 'cli') + + +sigdict = parse_json_funcsigs(get_command_descriptions("all"), 'cli') + + +class TestArgparse(unittest.TestCase): + + def _assert_valid_command(self, args): + result = validate_command(sigdict, args) + self.assertNotIn(result, [{}, None]) + + def check_1_natural_arg(self, prefix, command): + self._assert_valid_command([prefix, command, '1']) + self.assertEqual({}, validate_command(sigdict, [prefix, command])) + self.assertEqual({}, validate_command(sigdict, [prefix, command, '-1'])) + self.assertEqual({}, validate_command(sigdict, [prefix, command, '1', + '1'])) + + def check_0_or_1_natural_arg(self, prefix, command): + self._assert_valid_command([prefix, command, '1']) + self._assert_valid_command([prefix, command]) + self.assertEqual({}, validate_command(sigdict, [prefix, command, '-1'])) + self.assertEqual({}, validate_command(sigdict, [prefix, command, '1', + '1'])) + + def check_1_string_arg(self, prefix, command): + self.assertEqual({}, validate_command(sigdict, [prefix, command])) + self._assert_valid_command([prefix, command, 'string']) + self.assertEqual({}, validate_command(sigdict, [prefix, + command, + 'string', + 'toomany'])) + + def check_0_or_1_string_arg(self, prefix, command): + self._assert_valid_command([prefix, command, 'string']) + self._assert_valid_command([prefix, command]) + self.assertEqual({}, validate_command(sigdict, [prefix, + command, + 'string', + 'toomany'])) + + def check_1_or_more_string_args(self, prefix, command): + self.assertEqual({}, validate_command(sigdict, [prefix, + command])) + self._assert_valid_command([prefix, command, 'string']) + self._assert_valid_command([prefix, command, 'string', 'more string']) + + def check_no_arg(self, prefix, command): + self._assert_valid_command([prefix, command]) + self.assertEqual({}, validate_command(sigdict, [prefix, + command, + 'toomany'])) + + def _capture_output(self, args, stdout=None, stderr=None): + if stdout: + stdout = StringIO() + sys.stdout = stdout + if stderr: + stderr = StringIO() + sys.stderr = stderr + ret = validate_command(sigdict, args) + if stdout: + stdout = stdout.getvalue().strip() + if stderr: + stderr = stderr.getvalue().strip() + return ret, stdout, stderr + + +class TestBasic(unittest.TestCase): + + def test_non_ascii_in_non_options(self): + # ArgumentPrefix("no match for {0}".format(s)) is not able to convert + # unicode str parameter into str. and validate_command() should not + # choke on it. + self.assertEqual({}, validate_command(sigdict, [u'章鱼和鱿鱼'])) + self.assertEqual({}, validate_command(sigdict, [u'–w'])) + # actually we always pass unicode strings to validate_command() in "ceph" + # CLI, but we also use bytestrings in our tests, so make sure it does not + # break. + self.assertEqual({}, validate_command(sigdict, ['章鱼和鱿鱼'])) + self.assertEqual({}, validate_command(sigdict, ['–w'])) + + +class TestPG(TestArgparse): + + def test_stat(self): + self._assert_valid_command(['pg', 'stat']) + + def test_getmap(self): + self._assert_valid_command(['pg', 'getmap']) + + def test_dump(self): + valid_commands = { + 'pg dump': {'prefix': 'pg dump'}, + 'pg dump all summary sum delta pools osds pgs pgs_brief': + {'prefix': 'pg dump', + 'dumpcontents': + 'all summary sum delta pools osds pgs pgs_brief'.split() + }, + 'pg dump --dumpcontents summary,sum': + {'prefix': 'pg dump', + 'dumpcontents': 'summary,sum'.split(',') + } + } + for command, expected_result in valid_commands.items(): + actual_result = validate_command(sigdict, command.split()) + expected_result['target'] = ('mon-mgr', '') + self.assertEqual(expected_result, actual_result) + invalid_commands = ['pg dump invalid'] + for command in invalid_commands: + actual_result = validate_command(sigdict, command.split()) + self.assertEqual({}, actual_result) + + def test_dump_json(self): + self._assert_valid_command(['pg', 'dump_json']) + self._assert_valid_command(['pg', 'dump_json', + 'all', + 'summary', + 'sum', + 'pools', + 'osds', + 'pgs']) + self.assertEqual({}, validate_command(sigdict, ['pg', 'dump_json', + 'invalid'])) + + def test_dump_pools_json(self): + self._assert_valid_command(['pg', 'dump_pools_json']) + + def test_dump_pools_stuck(self): + self._assert_valid_command(['pg', 'dump_stuck']) + self._assert_valid_command(['pg', 'dump_stuck', + 'inactive', + 'unclean', + 'stale']) + self.assertEqual({}, validate_command(sigdict, ['pg', 'dump_stuck', + 'invalid'])) + self._assert_valid_command(['pg', 'dump_stuck', + 'inactive', + '1234']) + + def one_pgid(self, command): + self._assert_valid_command(['pg', command, '1.1']) + self.assertEqual({}, validate_command(sigdict, ['pg', command])) + self.assertEqual({}, validate_command(sigdict, ['pg', command, '1'])) + + def test_map(self): + self.one_pgid('map') + + def test_scrub(self): + self.one_pgid('scrub') + + def test_deep_scrub(self): + self.one_pgid('deep-scrub') + + def test_repair(self): + self.one_pgid('repair') + + def test_debug(self): + self._assert_valid_command(['pg', + 'debug', + 'unfound_objects_exist']) + self._assert_valid_command(['pg', + 'debug', + 'degraded_pgs_exist']) + self.assertEqual({}, validate_command(sigdict, ['pg', 'debug'])) + self.assertEqual({}, validate_command(sigdict, ['pg', 'debug', + 'invalid'])) + + def test_pg_missing_args_output(self): + ret, _, stderr = self._capture_output(['pg'], stderr=True) + self.assertEqual({}, ret) + self.assertRegexpMatches(stderr, re.compile('no valid command found.* closest matches')) + + def test_pg_wrong_arg_output(self): + ret, _, stderr = self._capture_output(['pg', 'map', 'bad-pgid'], + stderr=True) + self.assertEqual({}, ret) + self.assertIn("Invalid command", stderr) + + +class TestAuth(TestArgparse): + + def test_export(self): + self._assert_valid_command(['auth', 'export']) + self._assert_valid_command(['auth', 'export', 'string']) + self.assertEqual({}, validate_command(sigdict, ['auth', + 'export', + 'string', + 'toomany'])) + + def test_get(self): + self.check_1_string_arg('auth', 'get') + + def test_get_key(self): + self.check_1_string_arg('auth', 'get-key') + + def test_print_key(self): + self.check_1_string_arg('auth', 'print-key') + self.check_1_string_arg('auth', 'print_key') + + def test_list(self): + self.check_no_arg('auth', 'list') + + def test_import(self): + self.check_no_arg('auth', 'import') + + def test_add(self): + self.check_1_or_more_string_args('auth', 'add') + + def test_get_or_create_key(self): + self.check_1_or_more_string_args('auth', 'get-or-create-key') + prefix = 'auth get-or-create-key' + entity = 'client.test' + caps = ['mon', + 'allow r', + 'osd', + 'allow rw pool=nfs-ganesha namespace=test, allow rw tag cephfs data=user_test_fs', + 'mds', + 'allow rw path=/'] + cmd = prefix.split() + [entity] + caps + self.assertEqual( + { + 'prefix': prefix, + 'entity': entity, + 'caps': caps + }, validate_command(sigdict, cmd)) + + def test_get_or_create(self): + self.check_1_or_more_string_args('auth', 'get-or-create') + + def test_caps(self): + self.assertEqual({}, validate_command(sigdict, ['auth', + 'caps'])) + self.assertEqual({}, validate_command(sigdict, ['auth', + 'caps', + 'string'])) + self._assert_valid_command(['auth', + 'caps', + 'string', + 'more string']) + + def test_del(self): + self.check_1_string_arg('auth', 'del') + + +class TestMonitor(TestArgparse): + + def test_compact(self): + self._assert_valid_command(['compact']) + + def test_fsid(self): + self._assert_valid_command(['fsid']) + + def test_log(self): + self.assertEqual({}, validate_command(sigdict, ['log'])) + self._assert_valid_command(['log', 'a logtext']) + self._assert_valid_command(['log', 'a logtext', 'and another']) + + def test_injectargs(self): + self.assertEqual({}, validate_command(sigdict, ['injectargs'])) + self._assert_valid_command(['injectargs', 'one']) + self._assert_valid_command(['injectargs', 'one', 'two']) + + def test_status(self): + self._assert_valid_command(['status']) + + def test_health(self): + self._assert_valid_command(['health']) + self._assert_valid_command(['health', 'detail']) + self.assertEqual({}, validate_command(sigdict, ['health', 'invalid'])) + self.assertEqual({}, validate_command(sigdict, ['health', 'detail', + 'toomany'])) + + def test_df(self): + self._assert_valid_command(['df']) + self._assert_valid_command(['df', 'detail']) + self.assertEqual({}, validate_command(sigdict, ['df', 'invalid'])) + self.assertEqual({}, validate_command(sigdict, ['df', 'detail', + 'toomany'])) + + def test_report(self): + self._assert_valid_command(['report']) + self._assert_valid_command(['report', 'tag1']) + self._assert_valid_command(['report', 'tag1', 'tag2']) + + def test_quorum_status(self): + self._assert_valid_command(['quorum_status']) + + def test_tell(self): + self.assertEqual({}, validate_command(sigdict, ['tell'])) + self.assertEqual({}, validate_command(sigdict, ['tell', 'invalid'])) + for name in ('osd', 'mon', 'client', 'mds'): + self.assertEqual({}, validate_command(sigdict, ['tell', name])) + self.assertEqual({}, validate_command(sigdict, ['tell', + name + ".42"])) + self._assert_valid_command(['tell', name + ".42", 'something']) + self._assert_valid_command(['tell', name + ".42", + 'something', + 'something else']) + + +class TestMDS(TestArgparse): + + def test_stat(self): + self.check_no_arg('mds', 'stat') + + def test_compat_show(self): + self._assert_valid_command(['mds', 'compat', 'show']) + self.assertEqual({}, validate_command(sigdict, ['mds', 'compat'])) + self.assertEqual({}, validate_command(sigdict, ['mds', 'compat', + 'show', 'toomany'])) + + def test_set_state(self): + self._assert_valid_command(['mds', 'set_state', '1', '2']) + self.assertEqual({}, validate_command(sigdict, ['mds', 'set_state'])) + self.assertEqual({}, validate_command(sigdict, ['mds', 'set_state', '-1'])) + self.assertEqual({}, validate_command(sigdict, ['mds', 'set_state', + '1', '-1'])) + self.assertEqual({}, validate_command(sigdict, ['mds', 'set_state', + '1', '21'])) + + def test_fail(self): + self.check_1_string_arg('mds', 'fail') + + def test_rm(self): + # Valid: single GID argument present + self._assert_valid_command(['mds', 'rm', '1']) + + # Missing GID arg: invalid + self.assertEqual({}, validate_command(sigdict, ['mds', 'rm'])) + # Extra arg: invalid + self.assertEqual({}, validate_command(sigdict, ['mds', 'rm', '1', 'mds.42'])) + + def test_rmfailed(self): + self._assert_valid_command(['mds', 'rmfailed', '0']) + self._assert_valid_command(['mds', 'rmfailed', '0', '--yes-i-really-mean-it']) + self.assertEqual({}, validate_command(sigdict, ['mds', 'rmfailed', '0', + '--yes-i-really-mean-it', + 'toomany'])) + + def test_compat_rm_compat(self): + self._assert_valid_command(['mds', 'compat', 'rm_compat', '1']) + self.assertEqual({}, validate_command(sigdict, ['mds', + 'compat', + 'rm_compat'])) + self.assertEqual({}, validate_command(sigdict, ['mds', + 'compat', + 'rm_compat', '-1'])) + self.assertEqual({}, validate_command(sigdict, ['mds', + 'compat', + 'rm_compat', + '1', + '1'])) + + def test_incompat_rm_incompat(self): + self._assert_valid_command(['mds', 'compat', 'rm_incompat', '1']) + self.assertEqual({}, validate_command(sigdict, ['mds', + 'compat', + 'rm_incompat'])) + self.assertEqual({}, validate_command(sigdict, ['mds', + 'compat', + 'rm_incompat', '-1'])) + self.assertEqual({}, validate_command(sigdict, ['mds', + 'compat', + 'rm_incompat', + '1', + '1'])) + + +class TestFS(TestArgparse): + + def test_dump(self): + self.check_0_or_1_natural_arg('fs', 'dump') + + def test_fs_new(self): + self._assert_valid_command(['fs', 'new', 'default', 'metadata', 'data']) + + def test_fs_set_max_mds(self): + self._assert_valid_command(['fs', 'set', 'default', 'max_mds', '1']) + self._assert_valid_command(['fs', 'set', 'default', 'max_mds', '2']) + + def test_fs_set_cluster_down(self): + self._assert_valid_command(['fs', 'set', 'default', 'down', 'true']) + + def test_fs_set_cluster_up(self): + self._assert_valid_command(['fs', 'set', 'default', 'down', 'false']) + + def test_fs_set_cluster_joinable(self): + self._assert_valid_command(['fs', 'set', 'default', 'joinable', 'true']) + + def test_fs_set_cluster_not_joinable(self): + self._assert_valid_command(['fs', 'set', 'default', 'joinable', 'false']) + + def test_fs_set(self): + self._assert_valid_command(['fs', 'set', 'default', 'max_file_size', '2']) + self._assert_valid_command(['fs', 'set', 'default', 'allow_new_snaps', 'no']) + self.assertEqual({}, validate_command(sigdict, ['fs', + 'set', + 'invalid'])) + + def test_fs_add_data_pool(self): + self._assert_valid_command(['fs', 'add_data_pool', 'default', '1']) + self._assert_valid_command(['fs', 'add_data_pool', 'default', 'foo']) + + def test_fs_remove_data_pool(self): + self._assert_valid_command(['fs', 'rm_data_pool', 'default', '1']) + self._assert_valid_command(['fs', 'rm_data_pool', 'default', 'foo']) + + def test_fs_rm(self): + self._assert_valid_command(['fs', 'rm', 'default']) + self._assert_valid_command(['fs', 'rm', 'default', '--yes-i-really-mean-it']) + self.assertEqual({}, validate_command(sigdict, ['fs', 'rm', 'default', '--yes-i-really-mean-it', 'toomany'])) + + def test_fs_ls(self): + self._assert_valid_command(['fs', 'ls']) + self.assertEqual({}, validate_command(sigdict, ['fs', 'ls', 'toomany'])) + + def test_fs_set_default(self): + self._assert_valid_command(['fs', 'set-default', 'cephfs']) + self.assertEqual({}, validate_command(sigdict, ['fs', 'set-default'])) + self.assertEqual({}, validate_command(sigdict, ['fs', 'set-default', 'cephfs', 'toomany'])) + + +class TestMon(TestArgparse): + + def test_dump(self): + self.check_0_or_1_natural_arg('mon', 'dump') + + def test_stat(self): + self.check_no_arg('mon', 'stat') + + def test_getmap(self): + self.check_0_or_1_natural_arg('mon', 'getmap') + + def test_add(self): + self._assert_valid_command(['mon', 'add', 'name', '1.2.3.4:1234']) + self.assertEqual({}, validate_command(sigdict, ['mon', 'add'])) + self.assertEqual({}, validate_command(sigdict, ['mon', 'add', 'name'])) + self.assertEqual({}, validate_command(sigdict, ['mon', 'add', + 'name', + '400.500.600.700'])) + + def test_remove(self): + self._assert_valid_command(['mon', 'remove', 'name']) + self.assertEqual({}, validate_command(sigdict, ['mon', 'remove'])) + self.assertEqual({}, validate_command(sigdict, ['mon', 'remove', + 'name', 'toomany'])) + + +class TestOSD(TestArgparse): + + def test_stat(self): + self.check_no_arg('osd', 'stat') + + def test_dump(self): + self.check_0_or_1_natural_arg('osd', 'dump') + + def test_osd_tree(self): + self.check_0_or_1_natural_arg('osd', 'tree') + cmd = 'osd tree down,out' + self.assertEqual( + { + 'prefix': 'osd tree', + 'states': ['down', 'out'] + }, validate_command(sigdict, cmd.split())) + + def test_osd_ls(self): + self.check_0_or_1_natural_arg('osd', 'ls') + + def test_osd_getmap(self): + self.check_0_or_1_natural_arg('osd', 'getmap') + + def test_osd_getcrushmap(self): + self.check_0_or_1_natural_arg('osd', 'getcrushmap') + + def test_perf(self): + self.check_no_arg('osd', 'perf') + + def test_getmaxosd(self): + self.check_no_arg('osd', 'getmaxosd') + + def test_find(self): + self.check_1_natural_arg('osd', 'find') + + def test_map(self): + self._assert_valid_command(['osd', 'map', 'poolname', 'objectname']) + self._assert_valid_command(['osd', 'map', 'poolname', 'objectname', 'nspace']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'map'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'map', 'poolname'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'map', + 'poolname', 'objectname', 'nspace', + 'toomany'])) + + def test_metadata(self): + self.check_0_or_1_natural_arg('osd', 'metadata') + + def test_scrub(self): + self.check_1_string_arg('osd', 'scrub') + + def test_deep_scrub(self): + self.check_1_string_arg('osd', 'deep-scrub') + + def test_repair(self): + self.check_1_string_arg('osd', 'repair') + + def test_lspools(self): + self._assert_valid_command(['osd', 'lspools']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'lspools', + 'toomany'])) + + def test_blocklist_ls(self): + self._assert_valid_command(['osd', 'blocklist', 'ls']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'blocklist'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'blocklist', + 'ls', 'toomany'])) + + def test_crush_rule(self): + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule'])) + for subcommand in ('list', 'ls'): + self._assert_valid_command(['osd', 'crush', 'rule', subcommand]) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'rule', subcommand, + 'toomany'])) + + def test_crush_rule_dump(self): + self._assert_valid_command(['osd', 'crush', 'rule', 'dump']) + self._assert_valid_command(['osd', 'crush', 'rule', 'dump', 'RULE']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'rule', 'dump', + 'RULE', + 'toomany'])) + + def test_crush_dump(self): + self._assert_valid_command(['osd', 'crush', 'dump']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'dump', + 'toomany'])) + + def test_setcrushmap(self): + self.check_no_arg('osd', 'setcrushmap') + + def test_crush_add_bucket(self): + self._assert_valid_command(['osd', 'crush', 'add-bucket', + 'name', 'type']) + self._assert_valid_command(['osd', 'crush', 'add-bucket', + 'name', 'type', 'root=foo-root', 'host=foo-host']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'add-bucket'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'add-bucket', '^^^', + 'type'])) + + def test_crush_rename_bucket(self): + self._assert_valid_command(['osd', 'crush', 'rename-bucket', + 'srcname', 'dstname']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'rename-bucket'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'rename-bucket', + 'srcname'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'rename-bucket', + 'srcname', + 'dstname', + 'toomany'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'rename-bucket', '^^^', + 'dstname'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'rename-bucket', + 'srcname', + '^^^^'])) + + def _check_crush_setter(self, setter): + self._assert_valid_command(['osd', 'crush', setter, + '*', '2.3', 'AZaz09-_.=']) + self._assert_valid_command(['osd', 'crush', setter, + 'osd.0', '2.3', 'AZaz09-_.=']) + self._assert_valid_command(['osd', 'crush', setter, + '0', '2.3', 'AZaz09-_.=']) + self._assert_valid_command(['osd', 'crush', setter, + '0', '2.3', 'AZaz09-_.=', 'AZaz09-_.=']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + setter, + 'osd.0'])) + ret = validate_command(sigdict, ['osd', 'crush', + setter, + 'osd.0', + '-1.0']) + assert ret in [None, {}] + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + setter, + 'osd.0', + '1.0', + '^^^'])) + + def test_crush_set(self): + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush'])) + self._check_crush_setter('set') + + def test_crush_add(self): + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush'])) + self._check_crush_setter('add') + + def test_crush_create_or_move(self): + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush'])) + self._check_crush_setter('create-or-move') + + def test_crush_move(self): + self._assert_valid_command(['osd', 'crush', 'move', + 'AZaz09-_.', 'AZaz09-_.=']) + self._assert_valid_command(['osd', 'crush', 'move', + '0', 'AZaz09-_.=', 'AZaz09-_.=']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'move'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'move', 'AZaz09-_.'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'move', '^^^', + 'AZaz09-_.='])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'move', 'AZaz09-_.', + '^^^'])) + + def test_crush_link(self): + self._assert_valid_command(['osd', 'crush', 'link', + 'name', 'AZaz09-_.=']) + self._assert_valid_command(['osd', 'crush', 'link', + 'name', 'AZaz09-_.=', 'AZaz09-_.=']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'link'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'link', + 'name'])) + + def test_crush_rm(self): + for alias in ('rm', 'remove', 'unlink'): + self._assert_valid_command(['osd', 'crush', alias, 'AZaz09-_.']) + self._assert_valid_command(['osd', 'crush', alias, + 'AZaz09-_.', 'AZaz09-_.']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + alias])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + alias, + 'AZaz09-_.', + 'AZaz09-_.', + 'toomany'])) + + def test_crush_reweight(self): + self._assert_valid_command(['osd', 'crush', 'reweight', + 'AZaz09-_.', '2.3']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'reweight'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'reweight', + 'AZaz09-_.'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'reweight', + 'AZaz09-_.', + '-1.0'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'reweight', + '^^^', + '2.3'])) + + def test_crush_tunables(self): + for tunable in ('legacy', 'argonaut', 'bobtail', 'firefly', + 'optimal', 'default'): + self._assert_valid_command(['osd', 'crush', 'tunables', + tunable]) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'tunables'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'tunables', + 'default', 'toomany'])) + + def test_crush_rule_create_simple(self): + self._assert_valid_command(['osd', 'crush', 'rule', 'create-simple', + 'AZaz09-_.', 'AZaz09-_.', 'AZaz09-_.']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-simple'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-simple', + 'AZaz09-_.'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-simple', + 'AZaz09-_.', + 'AZaz09-_.'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-simple', + '^^^', + 'AZaz09-_.', + 'AZaz09-_.'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-simple', + 'AZaz09-_.', + '|||', + 'AZaz09-_.'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-simple', + 'AZaz09-_.', + 'AZaz09-_.', + '+++'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-simple', + 'AZaz09-_.', + 'AZaz09-_.', + 'AZaz09-_.', + 'toomany'])) + + def test_crush_rule_create_erasure(self): + self._assert_valid_command(['osd', 'crush', 'rule', 'create-erasure', + 'AZaz09-_.']) + self._assert_valid_command(['osd', 'crush', 'rule', 'create-erasure', + 'AZaz09-_.', 'whatever']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-erasure'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-erasure', + '^^^'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', 'rule', + 'create-erasure', + 'name', '^^^'])) + + def test_crush_rule_rm(self): + self._assert_valid_command(['osd', 'crush', 'rule', 'rm', 'AZaz09-_.']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'rule', 'rm'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'rule', 'rm', + '^^^^'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'crush', + 'rule', 'rm', + 'AZaz09-_.', + 'toomany'])) + + def test_setmaxosd(self): + self.check_1_natural_arg('osd', 'setmaxosd') + + def test_pause(self): + self.check_no_arg('osd', 'pause') + + def test_unpause(self): + self.check_no_arg('osd', 'unpause') + + def test_erasure_code_profile_set(self): + self._assert_valid_command(['osd', 'erasure-code-profile', 'set', + 'name']) + self._assert_valid_command(['osd', 'erasure-code-profile', 'set', + 'name', 'A=B']) + self._assert_valid_command(['osd', 'erasure-code-profile', 'set', + 'name', 'A=B', 'C=D']) + self.assertEqual({}, validate_command(sigdict, ['osd', + 'erasure-code-profile', + 'set'])) + self.assertEqual({}, validate_command(sigdict, ['osd', + 'erasure-code-profile', + 'set', + '^^^^'])) + + def test_erasure_code_profile_get(self): + self._assert_valid_command(['osd', 'erasure-code-profile', 'get', + 'name']) + self.assertEqual({}, validate_command(sigdict, ['osd', + 'erasure-code-profile', + 'get'])) + self.assertEqual({}, validate_command(sigdict, ['osd', + 'erasure-code-profile', + 'get', + '^^^^'])) + + def test_erasure_code_profile_rm(self): + self._assert_valid_command(['osd', 'erasure-code-profile', 'rm', + 'name']) + self.assertEqual({}, validate_command(sigdict, ['osd', + 'erasure-code-profile', + 'rm'])) + self.assertEqual({}, validate_command(sigdict, ['osd', + 'erasure-code-profile', + 'rm', + '^^^^'])) + + def test_erasure_code_profile_ls(self): + self._assert_valid_command(['osd', 'erasure-code-profile', 'ls']) + self.assertEqual({}, validate_command(sigdict, ['osd', + 'erasure-code-profile', + 'ls', + 'toomany'])) + + def test_set_unset(self): + for action in ('set', 'unset'): + for flag in ('pause', 'noup', 'nodown', 'noout', 'noin', + 'nobackfill', 'norecover', 'noscrub', 'nodeep-scrub'): + self._assert_valid_command(['osd', action, flag]) + self.assertEqual({}, validate_command(sigdict, ['osd', action])) + self.assertEqual({}, validate_command(sigdict, ['osd', action, + 'invalid'])) + self.assertEqual({}, validate_command(sigdict, ['osd', action, + 'pause', + 'toomany'])) + + def test_down(self): + self.check_1_or_more_string_args('osd', 'down') + + def test_out(self): + self.check_1_or_more_string_args('osd', 'out') + + def test_in(self): + self.check_1_or_more_string_args('osd', 'in') + + def test_rm(self): + self.check_1_or_more_string_args('osd', 'rm') + + def test_reweight(self): + self._assert_valid_command(['osd', 'reweight', '1', '0.1']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'reweight'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'reweight', + '1'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'reweight', + '1', '2.0'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'reweight', + '-1', '0.1'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'reweight', + '1', '0.1', + 'toomany'])) + + def test_lost(self): + self._assert_valid_command(['osd', 'lost', '1', + '--yes-i-really-mean-it']) + self._assert_valid_command(['osd', 'lost', '1']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'lost'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'lost', + '1', + 'what?'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'lost', + '-1', + '--yes-i-really-mean-it'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'lost', + '1', + '--yes-i-really-mean-it', + 'toomany'])) + + def test_create(self): + uuid = '12345678123456781234567812345678' + self._assert_valid_command(['osd', 'create']) + self._assert_valid_command(['osd', 'create', uuid]) + self.assertEqual({}, validate_command(sigdict, ['osd', 'create', + 'invalid'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'create', + uuid, + 'toomany'])) + + def test_blocklist(self): + for action in ('add', 'rm'): + self._assert_valid_command(['osd', 'blocklist', action, + '1.2.3.4/567']) + self._assert_valid_command(['osd', 'blocklist', action, + '1.2.3.4']) + self._assert_valid_command(['osd', 'blocklist', action, + '1.2.3.4/567', '600.40']) + self._assert_valid_command(['osd', 'blocklist', action, + '1.2.3.4', '600.40']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'blocklist', + action, + 'invalid', + '600.40'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'blocklist', + action, + '1.2.3.4/567', + '-1.0'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'blocklist', + action, + '1.2.3.4/567', + '600.40', + 'toomany'])) + + def test_pool_mksnap(self): + self._assert_valid_command(['osd', 'pool', 'mksnap', + 'poolname', 'snapname']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'mksnap'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'mksnap', + 'poolname'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'mksnap', + 'poolname', 'snapname', + 'toomany'])) + + def test_pool_rmsnap(self): + self._assert_valid_command(['osd', 'pool', 'rmsnap', + 'poolname', 'snapname']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'rmsnap'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'rmsnap', + 'poolname'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'rmsnap', + 'poolname', 'snapname', + 'toomany'])) + + def test_pool_kwargs(self): + """ + Use the pool creation command to exercise keyword-style arguments + since it has lots of parameters + """ + # Simply use a keyword arg instead of a positional arg, in its + # normal order (pgp_num after pg_num) + self.assertEqual( + { + "prefix": "osd pool create", + "pool": "foo", + "pg_num": 8, + "pgp_num": 16 + }, validate_command(sigdict, [ + 'osd', 'pool', 'create', "foo", "8", "--pgp_num", "16"])) + + # Again, but using the "--foo=bar" style + self.assertEqual( + { + "prefix": "osd pool create", + "pool": "foo", + "pg_num": 8, + "pgp_num": 16 + }, validate_command(sigdict, [ + 'osd', 'pool', 'create', "foo", "8", "--pgp_num=16"])) + + # Specify keyword args in a different order than their definitions + # (pgp_num after pool_type) + self.assertEqual( + { + "prefix": "osd pool create", + "pool": "foo", + "pg_num": 8, + "pgp_num": 16, + "pool_type": "replicated" + }, validate_command(sigdict, [ + 'osd', 'pool', 'create', "foo", "8", + "--pool_type", "replicated", + "--pgp_num", "16"])) + + # Use a keyword argument that doesn't exist, should fail validation + self.assertEqual({}, validate_command(sigdict, + ['osd', 'pool', 'create', "foo", "8", "--foo=bar"])) + + def test_foo(self): + # Long form of a boolean argument (--foo=true) + self.assertEqual( + { + "prefix": "osd pool delete", + "pool": "foo", + "pool2": "foo", + "yes_i_really_really_mean_it": True + }, validate_command(sigdict, [ + 'osd', 'pool', 'delete', "foo", "foo", + "--yes-i-really-really-mean-it=true"])) + + def test_pool_bool_args(self): + """ + Use pool deletion to exercise boolean arguments since it has + the --yes-i-really-really-mean-it flags + """ + + # Short form of a boolean argument (--foo) + self.assertEqual( + { + "prefix": "osd pool delete", + "pool": "foo", + "pool2": "foo", + "yes_i_really_really_mean_it": True + }, validate_command(sigdict, [ + 'osd', 'pool', 'delete', "foo", "foo", + "--yes-i-really-really-mean-it"])) + + # Long form of a boolean argument (--foo=true) + self.assertEqual( + { + "prefix": "osd pool delete", + "pool": "foo", + "pool2": "foo", + "yes_i_really_really_mean_it": True + }, validate_command(sigdict, [ + 'osd', 'pool', 'delete', "foo", "foo", + "--yes-i-really-really-mean-it=true"])) + + # Negative form of a boolean argument (--foo=false) + self.assertEqual( + { + "prefix": "osd pool delete", + "pool": "foo", + "pool2": "foo", + "yes_i_really_really_mean_it": False + }, validate_command(sigdict, [ + 'osd', 'pool', 'delete', "foo", "foo", + "--yes-i-really-really-mean-it=false"])) + + # Invalid value boolean argument (--foo=somethingelse) + self.assertEqual({}, validate_command(sigdict, [ + 'osd', 'pool', 'delete', "foo", "foo", + "--yes-i-really-really-mean-it=rhubarb"])) + + def test_pool_create(self): + self._assert_valid_command(['osd', 'pool', 'create', + 'poolname', '128']) + self._assert_valid_command(['osd', 'pool', 'create', + 'poolname', '128', '128']) + self._assert_valid_command(['osd', 'pool', 'create', + 'poolname', '128', '128', + 'replicated']) + self._assert_valid_command(['osd', 'pool', 'create', + 'poolname', '128', '128', + 'erasure', 'A-Za-z0-9-_.', 'ruleset^^']) + self._assert_valid_command(['osd', 'pool', 'create', 'poolname']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'create'])) + # invalid pg_num and pgp_num, like "-1", could spill over to + # erasure_code_profile and rule as they are valid profile and rule + # names, so validate_commands() cannot identify such cases. + # but if they are matched by profile and rule, the "rule" argument + # won't get a chance to be matched anymore. + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'create', + 'poolname', + '-1', '-1', + 'ruleset'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'create', + 'poolname', + '128', '128', + 'erasure', '^^^', + 'ruleset'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'create', + 'poolname', + '128', '128', + 'erasure', 'profile', + 'ruleset', + 'toomany'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'create', + 'poolname', + '128', '128', + 'INVALID', 'profile', + 'ruleset'])) + + def test_pool_delete(self): + self._assert_valid_command(['osd', 'pool', 'delete', + 'poolname', 'poolname', + '--yes-i-really-really-mean-it']) + self._assert_valid_command(['osd', 'pool', 'delete', + 'poolname', 'poolname']) + self._assert_valid_command(['osd', 'pool', 'delete', + 'poolname']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'delete'])) + self.assertEqual({}, validate_command(sigdict, + ['osd', 'pool', 'delete', + 'poolname', 'poolname', + '--yes-i-really-really-mean-it', + 'toomany'])) + + def test_pool_rename(self): + self._assert_valid_command(['osd', 'pool', 'rename', + 'poolname', 'othername']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'rename'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'rename', + 'poolname'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', 'rename', + 'poolname', 'othername', + 'toomany'])) + + def test_pool_get(self): + for var in ('size', 'min_size', + 'pg_num', 'pgp_num', 'crush_rule', 'fast_read', + 'scrub_min_interval', 'scrub_max_interval', + 'deep_scrub_interval', 'recovery_priority', + 'recovery_op_priority'): + self._assert_valid_command(['osd', 'pool', 'get', 'poolname', var]) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', + 'get'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', + 'get', 'poolname'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', + 'get', 'poolname', + 'size', 'toomany'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', + 'get', 'poolname', + 'invalid'])) + + def test_pool_set(self): + for var in ('size', 'min_size', + 'pg_num', 'pgp_num', 'crush_rule', + 'hashpspool', 'fast_read', + 'scrub_min_interval', 'scrub_max_interval', + 'deep_scrub_interval', 'recovery_priority', + 'recovery_op_priority'): + self._assert_valid_command(['osd', 'pool', + 'set', 'poolname', var, 'value']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', + 'set'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', + 'set', 'poolname'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', + 'set', 'poolname', + 'size', 'value', + 'toomany'])) + + def test_pool_set_quota(self): + for field in ('max_objects', 'max_bytes'): + self._assert_valid_command(['osd', 'pool', 'set-quota', + 'poolname', field, '10K']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', + 'set-quota'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', + 'set-quota', + 'poolname'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', + 'set-quota', + 'poolname', + 'max_objects'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', + 'set-quota', + 'poolname', + 'invalid', + '10K'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'pool', + 'set-quota', + 'poolname', + 'max_objects', + '10K', + 'toomany'])) + + def test_reweight_by_utilization(self): + self._assert_valid_command(['osd', 'reweight-by-utilization']) + self._assert_valid_command(['osd', 'reweight-by-utilization', '100']) + self._assert_valid_command(['osd', 'reweight-by-utilization', '100', '.1']) + self.assertEqual({}, validate_command(sigdict, ['osd', + 'reweight-by-utilization', + '100', + 'toomany'])) + + def test_tier_op(self): + for op in ('add', 'remove', 'set-overlay'): + self._assert_valid_command(['osd', 'tier', op, + 'poolname', 'othername']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'tier', op])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'tier', op, + 'poolname'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'tier', op, + 'poolname', + 'othername', + 'toomany'])) + + def test_tier_cache_mode(self): + for mode in ('none', 'writeback', 'readonly', 'readproxy'): + self._assert_valid_command(['osd', 'tier', 'cache-mode', + 'poolname', mode]) + self.assertEqual({}, validate_command(sigdict, ['osd', 'tier', + 'cache-mode'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'tier', + 'cache-mode', + 'invalid'])) + + def test_tier_remove_overlay(self): + self._assert_valid_command(['osd', 'tier', 'remove-overlay', + 'poolname']) + self.assertEqual({}, validate_command(sigdict, ['osd', 'tier', + 'remove-overlay'])) + self.assertEqual({}, validate_command(sigdict, ['osd', 'tier', + 'remove-overlay', + 'poolname', + 'toomany'])) + + def _set_ratio(self, command): + self._assert_valid_command(['osd', command, '0.0']) + self.assertEqual({}, validate_command(sigdict, ['osd', command])) + self.assertEqual({}, validate_command(sigdict, ['osd', command, '2.0'])) + + def test_set_full_ratio(self): + self._set_ratio('set-full-ratio') + + def test_set_backfillfull_ratio(self): + self._set_ratio('set-backfillfull-ratio') + + def test_set_nearfull_ratio(self): + self._set_ratio('set-nearfull-ratio') + + +class TestConfigKey(TestArgparse): + + def test_get(self): + self.check_1_string_arg('config-key', 'get') + + def test_put(self): + self._assert_valid_command(['config-key', 'put', + 'key']) + self._assert_valid_command(['config-key', 'put', + 'key', 'value']) + self.assertEqual({}, validate_command(sigdict, ['config-key', 'put'])) + self.assertEqual({}, validate_command(sigdict, ['config-key', 'put', + 'key', 'value', + 'toomany'])) + + def test_del(self): + self.check_1_string_arg('config-key', 'del') + + def test_exists(self): + self.check_1_string_arg('config-key', 'exists') + + def test_dump(self): + self.check_0_or_1_string_arg('config-key', 'dump') + + def test_list(self): + self.check_no_arg('config-key', 'list') + + +class TestValidate(unittest.TestCase): + + ARGS = 0 + KWARGS = 1 + KWARGS_EQ = 2 + MIXED = 3 + + def setUp(self): + self.prefix = ['some', 'random', 'cmd'] + self.args_dict = [ + {'name': 'variable_one', 'type': 'CephString'}, + {'name': 'variable_two', 'type': 'CephString'}, + {'name': 'variable_three', 'type': 'CephString'}, + {'name': 'variable_four', 'type': 'CephInt'}, + {'name': 'variable_five', 'type': 'CephString'}] + self.args = [] + for d in self.args_dict: + if d['type'] == 'CephInt': + val = "{}".format(random.randint(0, 100)) + elif d['type'] == 'CephString': + letters = string.ascii_letters + str_len = random.randint(5, 10) + val = ''.join(random.choice(letters) for _ in range(str_len)) + else: + raise skipTest() + + self.args.append((d['name'], val)) + + self.sig = parse_funcsig(self.prefix + self.args_dict) + + def _arg_kwarg_test(self, prefix, args, sig, arg_type=0): + """ + Runs validate in different arg/kargs ways. + + :param prefix: List of prefix commands (that can't be kwarged) + :param args: a list of kwarg, arg pairs: [(k1, v1), (k2, v2), ...] + :param sig: The sig to match + :param arg_type: how to build the args to send. As positional args (ARGS), + as long kwargs (KWARGS [--k v]), other style long kwargs + (KWARGS_EQ (--k=v]), and mixed (MIXED) where there will be + a random mix of the above. + :return: None, the method will assert. + """ + final_args = list(prefix) + for k, v in args: + a_type = arg_type + if a_type == self.MIXED: + a_type = random.choice((self.ARGS, + self.KWARGS, + self.KWARGS_EQ)) + if a_type == self.ARGS: + final_args.append(v) + elif a_type == self.KWARGS: + final_args.extend(["--{}".format(k), v]) + else: + final_args.append("--{}={}".format(k, v)) + + try: + validate(final_args, sig) + except (ArgumentError, ArgumentMissing, + ArgumentNumber, ArgumentTooFew, ArgumentValid) as ex: + self.fail("Validation failed: {}".format(str(ex))) + + def test_args_and_kwargs_validate(self): + for arg_type in (self.ARGS, self.KWARGS, self.KWARGS_EQ, self.MIXED): + self._arg_kwarg_test(self.prefix, self.args, self.sig, arg_type) + + +if __name__ == '__main__': + unittest.main() + + +# Local Variables: +# compile-command: "cd ../../..; cmake --build build --target get_command_descriptions -j4 && +# CEPH_BIN=build/bin \ +# PYTHONPATH=src/pybind python3 \ +# src/test/pybind/test_ceph_argparse.py" +# End: diff --git a/src/test/pybind/test_ceph_daemon.py b/src/test/pybind/test_ceph_daemon.py new file mode 100755 index 000000000..df8d4c0b0 --- /dev/null +++ b/src/test/pybind/test_ceph_daemon.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +# -*- mode:python; tab-width:4; indent-tabs-mode:t -*- +# vim: ts=4 sw=4 smarttab expandtab +# +""" +Copyright (C) 2015 Red Hat + +This is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public +License version 2, as published by the Free Software +Foundation. See file COPYING. +""" + +import unittest + +from ceph_daemon import DaemonWatcher + +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + + +class TestDaemonWatcher(unittest.TestCase): + def test_format(self): + dw = DaemonWatcher(None) + + self.assertEqual(dw.format_dimless(1, 4), " 1 ") + self.assertEqual(dw.format_dimless(1000, 4), "1.0k") + self.assertEqual(dw.format_dimless(3.14159, 4), " 3 ") + self.assertEqual(dw.format_dimless(1400000, 4), "1.4M") + + def test_col_width(self): + dw = DaemonWatcher(None) + + self.assertEqual(dw.col_width("foo"), 4) + self.assertEqual(dw.col_width("foobar"), 6) + + def test_supports_color(self): + dw = DaemonWatcher(None) + # Can't count on having a tty available during tests, so only test the false case + self.assertFalse(dw.supports_color(StringIO())) + + +if __name__ == '__main__': + unittest.main() + + +# Local Variables: +# compile-command: "cd ../../..; +# PYTHONPATH=src/pybind python3 src/test/pybind/test_ceph_daemon.py" +# End: diff --git a/src/test/pybind/test_cephfs.py b/src/test/pybind/test_cephfs.py new file mode 100644 index 000000000..83c98ad0b --- /dev/null +++ b/src/test/pybind/test_cephfs.py @@ -0,0 +1,906 @@ +# vim: expandtab smarttab shiftwidth=4 softtabstop=4 +from nose.tools import assert_raises, assert_equal, assert_not_equal, assert_greater, with_setup +import cephfs as libcephfs +import fcntl +import os +import random +import time +import stat +import uuid +from datetime import datetime + +cephfs = None + +def setup_module(): + global cephfs + cephfs = libcephfs.LibCephFS(conffile='') + cephfs.mount() + +def teardown_module(): + global cephfs + cephfs.shutdown() + +def setup_test(): + d = cephfs.opendir(b"/") + dent = cephfs.readdir(d) + while dent: + if (dent.d_name not in [b".", b".."]): + if dent.is_dir(): + cephfs.rmdir(b"/" + dent.d_name) + else: + cephfs.unlink(b"/" + dent.d_name) + + dent = cephfs.readdir(d) + + cephfs.closedir(d) + + cephfs.chdir(b"/") + _, ret_buf = cephfs.listxattr("/") + print(f'ret_buf={ret_buf}') + xattrs = ret_buf.decode('utf-8').split('\x00') + for xattr in xattrs[:-1]: + cephfs.removexattr("/", xattr) + +@with_setup(setup_test) +def test_conf_get(): + fsid = cephfs.conf_get("fsid") + assert(len(fsid) > 0) + +@with_setup(setup_test) +def test_version(): + cephfs.version() + +@with_setup(setup_test) +def test_fstat(): + fd = cephfs.open(b'file-1', 'w', 0o755) + stat = cephfs.fstat(fd) + assert(len(stat) == 13) + cephfs.close(fd) + +@with_setup(setup_test) +def test_statfs(): + stat = cephfs.statfs(b'/') + assert(len(stat) == 11) + +@with_setup(setup_test) +def test_statx(): + stat = cephfs.statx(b'/', libcephfs.CEPH_STATX_MODE, 0) + assert('mode' in stat.keys()) + stat = cephfs.statx(b'/', libcephfs.CEPH_STATX_BTIME, 0) + assert('btime' in stat.keys()) + + fd = cephfs.open(b'file-1', 'w', 0o755) + cephfs.write(fd, b"1111", 0) + cephfs.close(fd) + cephfs.symlink(b'file-1', b'file-2') + stat = cephfs.statx(b'file-2', libcephfs.CEPH_STATX_MODE | libcephfs.CEPH_STATX_BTIME, libcephfs.AT_SYMLINK_NOFOLLOW) + assert('mode' in stat.keys()) + assert('btime' in stat.keys()) + cephfs.unlink(b'file-2') + cephfs.unlink(b'file-1') + +@with_setup(setup_test) +def test_syncfs(): + stat = cephfs.sync_fs() + +@with_setup(setup_test) +def test_fsync(): + fd = cephfs.open(b'file-1', 'w', 0o755) + cephfs.write(fd, b"asdf", 0) + stat = cephfs.fsync(fd, 0) + cephfs.write(fd, b"qwer", 0) + stat = cephfs.fsync(fd, 1) + cephfs.close(fd) + #sync on non-existing fd (assume fd 12345 is not exists) + assert_raises(libcephfs.Error, cephfs.fsync, 12345, 0) + +@with_setup(setup_test) +def test_directory(): + cephfs.mkdir(b"/temp-directory", 0o755) + cephfs.mkdirs(b"/temp-directory/foo/bar", 0o755) + cephfs.chdir(b"/temp-directory") + assert_equal(cephfs.getcwd(), b"/temp-directory") + cephfs.rmdir(b"/temp-directory/foo/bar") + cephfs.rmdir(b"/temp-directory/foo") + cephfs.rmdir(b"/temp-directory") + assert_raises(libcephfs.ObjectNotFound, cephfs.chdir, b"/temp-directory") + +@with_setup(setup_test) +def test_walk_dir(): + cephfs.chdir(b"/") + dirs = [b"dir-1", b"dir-2", b"dir-3"] + for i in dirs: + cephfs.mkdir(i, 0o755) + handler = cephfs.opendir(b"/") + d = cephfs.readdir(handler) + dirs += [b".", b".."] + while d: + assert(d.d_name in dirs) + dirs.remove(d.d_name) + d = cephfs.readdir(handler) + assert(len(dirs) == 0) + dirs = [b"/dir-1", b"/dir-2", b"/dir-3"] + for i in dirs: + cephfs.rmdir(i) + cephfs.closedir(handler) + +@with_setup(setup_test) +def test_xattr(): + assert_raises(libcephfs.OperationNotSupported, cephfs.setxattr, "/", "key", b"value", 0) + cephfs.setxattr("/", "user.key", b"value", 0) + assert_equal(b"value", cephfs.getxattr("/", "user.key")) + + cephfs.setxattr("/", "user.big", b"x" * 300, 0) + + # Default size is 255, get ERANGE + assert_raises(libcephfs.OutOfRange, cephfs.getxattr, "/", "user.big") + + # Pass explicit size, and we'll get the value + assert_equal(300, len(cephfs.getxattr("/", "user.big", 300))) + + cephfs.removexattr("/", "user.key") + # user.key is already removed + assert_raises(libcephfs.NoData, cephfs.getxattr, "/", "user.key") + + # user.big is only listed + ret_val, ret_buff = cephfs.listxattr("/") + assert_equal(9, ret_val) + assert_equal("user.big\x00", ret_buff.decode('utf-8')) + +@with_setup(setup_test) +def test_ceph_mirror_xattr(): + def gen_mirror_xattr(): + cluster_id = str(uuid.uuid4()) + fs_id = random.randint(1, 10) + mirror_xattr = f'cluster_id={cluster_id} fs_id={fs_id}' + return mirror_xattr.encode('utf-8') + + mirror_xattr_enc_1 = gen_mirror_xattr() + + # mirror xattr is only allowed on root + cephfs.mkdir('/d0', 0o755) + assert_raises(libcephfs.InvalidValue, cephfs.setxattr, + '/d0', 'ceph.mirror.info', mirror_xattr_enc_1, os.XATTR_CREATE) + cephfs.rmdir('/d0') + + cephfs.setxattr('/', 'ceph.mirror.info', mirror_xattr_enc_1, os.XATTR_CREATE) + assert_equal(mirror_xattr_enc_1, cephfs.getxattr('/', 'ceph.mirror.info')) + + # setting again with XATTR_CREATE should fail + assert_raises(libcephfs.ObjectExists, cephfs.setxattr, + '/', 'ceph.mirror.info', mirror_xattr_enc_1, os.XATTR_CREATE) + + # ceph.mirror.info should not show up in listing + ret_val, _ = cephfs.listxattr("/") + assert_equal(0, ret_val) + + mirror_xattr_enc_2 = gen_mirror_xattr() + + cephfs.setxattr('/', 'ceph.mirror.info', mirror_xattr_enc_2, os.XATTR_REPLACE) + assert_equal(mirror_xattr_enc_2, cephfs.getxattr('/', 'ceph.mirror.info')) + + cephfs.removexattr('/', 'ceph.mirror.info') + # ceph.mirror.info is already removed + assert_raises(libcephfs.NoData, cephfs.getxattr, '/', 'ceph.mirror.info') + # removing again should throw error + assert_raises(libcephfs.NoData, cephfs.removexattr, "/", "ceph.mirror.info") + + # check mirror info xattr format + assert_raises(libcephfs.InvalidValue, cephfs.setxattr, '/', 'ceph.mirror.info', b"unknown", 0) + +@with_setup(setup_test) +def test_fxattr(): + fd = cephfs.open(b'/file-fxattr', 'w', 0o755) + assert_raises(libcephfs.OperationNotSupported, cephfs.fsetxattr, fd, "key", b"value", 0) + assert_raises(TypeError, cephfs.fsetxattr, "fd", "user.key", b"value", 0) + assert_raises(TypeError, cephfs.fsetxattr, fd, "user.key", "value", 0) + assert_raises(TypeError, cephfs.fsetxattr, fd, "user.key", b"value", "0") + cephfs.fsetxattr(fd, "user.key", b"value", 0) + assert_equal(b"value", cephfs.fgetxattr(fd, "user.key")) + + cephfs.fsetxattr(fd, "user.big", b"x" * 300, 0) + + # Default size is 255, get ERANGE + assert_raises(libcephfs.OutOfRange, cephfs.fgetxattr, fd, "user.big") + + # Pass explicit size, and we'll get the value + assert_equal(300, len(cephfs.fgetxattr(fd, "user.big", 300))) + + cephfs.fremovexattr(fd, "user.key") + # user.key is already removed + assert_raises(libcephfs.NoData, cephfs.fgetxattr, fd, "user.key") + + # user.big is only listed + ret_val, ret_buff = cephfs.flistxattr(fd) + assert_equal(9, ret_val) + assert_equal("user.big\x00", ret_buff.decode('utf-8')) + cephfs.close(fd) + cephfs.unlink(b'/file-fxattr') + +@with_setup(setup_test) +def test_rename(): + cephfs.mkdir(b"/a", 0o755) + cephfs.mkdir(b"/a/b", 0o755) + cephfs.rename(b"/a", b"/b") + cephfs.stat(b"/b/b") + cephfs.rmdir(b"/b/b") + cephfs.rmdir(b"/b") + +@with_setup(setup_test) +def test_open(): + assert_raises(libcephfs.ObjectNotFound, cephfs.open, b'file-1', 'r') + assert_raises(libcephfs.ObjectNotFound, cephfs.open, b'file-1', 'r+') + fd = cephfs.open(b'file-1', 'w', 0o755) + cephfs.write(fd, b"asdf", 0) + cephfs.close(fd) + fd = cephfs.open(b'file-1', 'r', 0o755) + assert_equal(cephfs.read(fd, 0, 4), b"asdf") + cephfs.close(fd) + fd = cephfs.open(b'file-1', 'r+', 0o755) + cephfs.write(fd, b"zxcv", 4) + assert_equal(cephfs.read(fd, 4, 8), b"zxcv") + cephfs.close(fd) + fd = cephfs.open(b'file-1', 'w+', 0o755) + assert_equal(cephfs.read(fd, 0, 4), b"") + cephfs.write(fd, b"zxcv", 4) + assert_equal(cephfs.read(fd, 4, 8), b"zxcv") + cephfs.close(fd) + fd = cephfs.open(b'file-1', os.O_RDWR, 0o755) + cephfs.write(fd, b"asdf", 0) + assert_equal(cephfs.read(fd, 0, 4), b"asdf") + cephfs.close(fd) + assert_raises(libcephfs.OperationNotSupported, cephfs.open, b'file-1', 'a') + cephfs.unlink(b'file-1') + +@with_setup(setup_test) +def test_link(): + fd = cephfs.open(b'file-1', 'w', 0o755) + cephfs.write(fd, b"1111", 0) + cephfs.close(fd) + cephfs.link(b'file-1', b'file-2') + fd = cephfs.open(b'file-2', 'r', 0o755) + assert_equal(cephfs.read(fd, 0, 4), b"1111") + cephfs.close(fd) + fd = cephfs.open(b'file-2', 'r+', 0o755) + cephfs.write(fd, b"2222", 4) + cephfs.close(fd) + fd = cephfs.open(b'file-1', 'r', 0o755) + assert_equal(cephfs.read(fd, 0, 8), b"11112222") + cephfs.close(fd) + cephfs.unlink(b'file-2') + +@with_setup(setup_test) +def test_symlink(): + fd = cephfs.open(b'file-1', 'w', 0o755) + cephfs.write(fd, b"1111", 0) + cephfs.close(fd) + cephfs.symlink(b'file-1', b'file-2') + fd = cephfs.open(b'file-2', 'r', 0o755) + assert_equal(cephfs.read(fd, 0, 4), b"1111") + cephfs.close(fd) + fd = cephfs.open(b'file-2', 'r+', 0o755) + cephfs.write(fd, b"2222", 4) + cephfs.close(fd) + fd = cephfs.open(b'file-1', 'r', 0o755) + assert_equal(cephfs.read(fd, 0, 8), b"11112222") + cephfs.close(fd) + cephfs.unlink(b'file-2') + +@with_setup(setup_test) +def test_readlink(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b"1111", 0) + cephfs.close(fd) + cephfs.symlink(b'/file-1', b'/file-2') + d = cephfs.readlink(b"/file-2",100) + assert_equal(d, b"/file-1") + cephfs.unlink(b'/file-2') + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_delete_cwd(): + assert_equal(b"/", cephfs.getcwd()) + + cephfs.mkdir(b"/temp-directory", 0o755) + cephfs.chdir(b"/temp-directory") + cephfs.rmdir(b"/temp-directory") + + # getcwd gives you something stale here: it remembers the path string + # even when things are unlinked. It's up to the caller to find out + # whether it really still exists + assert_equal(b"/temp-directory", cephfs.getcwd()) + +@with_setup(setup_test) +def test_flock(): + fd = cephfs.open(b'file-1', 'w', 0o755) + + cephfs.flock(fd, fcntl.LOCK_EX, 123); + fd2 = cephfs.open(b'file-1', 'w', 0o755) + + assert_raises(libcephfs.WouldBlock, cephfs.flock, fd2, + fcntl.LOCK_EX | fcntl.LOCK_NB, 456); + cephfs.close(fd2) + + cephfs.close(fd) + +@with_setup(setup_test) +def test_mount_unmount(): + test_directory() + cephfs.unmount() + cephfs.mount() + test_open() + +@with_setup(setup_test) +def test_lxattr(): + fd = cephfs.open(b'/file-lxattr', 'w', 0o755) + cephfs.close(fd) + cephfs.setxattr(b"/file-lxattr", "user.key", b"value", 0) + cephfs.symlink(b"/file-lxattr", b"/file-sym-lxattr") + assert_equal(b"value", cephfs.getxattr(b"/file-sym-lxattr", "user.key")) + assert_raises(libcephfs.NoData, cephfs.lgetxattr, b"/file-sym-lxattr", "user.key") + + cephfs.lsetxattr(b"/file-sym-lxattr", "trusted.key-sym", b"value-sym", 0) + assert_equal(b"value-sym", cephfs.lgetxattr(b"/file-sym-lxattr", "trusted.key-sym")) + cephfs.lsetxattr(b"/file-sym-lxattr", "trusted.big", b"x" * 300, 0) + + # Default size is 255, get ERANGE + assert_raises(libcephfs.OutOfRange, cephfs.lgetxattr, b"/file-sym-lxattr", "trusted.big") + + # Pass explicit size, and we'll get the value + assert_equal(300, len(cephfs.lgetxattr(b"/file-sym-lxattr", "trusted.big", 300))) + + cephfs.lremovexattr(b"/file-sym-lxattr", "trusted.key-sym") + # trusted.key-sym is already removed + assert_raises(libcephfs.NoData, cephfs.lgetxattr, b"/file-sym-lxattr", "trusted.key-sym") + + # trusted.big is only listed + ret_val, ret_buff = cephfs.llistxattr(b"/file-sym-lxattr") + assert_equal(12, ret_val) + assert_equal("trusted.big\x00", ret_buff.decode('utf-8')) + cephfs.unlink(b'/file-lxattr') + cephfs.unlink(b'/file-sym-lxattr') + +@with_setup(setup_test) +def test_mount_root(): + cephfs.mkdir(b"/mount-directory", 0o755) + cephfs.unmount() + cephfs.mount(mount_root = b"/mount-directory") + + assert_raises(libcephfs.Error, cephfs.mount, mount_root = b"/nowhere") + cephfs.unmount() + cephfs.mount() + +@with_setup(setup_test) +def test_utime(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b'0000', 0) + cephfs.close(fd) + + stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + time.sleep(1) + cephfs.utime(b'/file-1') + + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_greater(stx_post['atime'], stx_pre['atime']) + assert_greater(stx_post['mtime'], stx_pre['mtime']) + + atime_pre = int(time.mktime(stx_pre['atime'].timetuple())) + mtime_pre = int(time.mktime(stx_pre['mtime'].timetuple())) + + cephfs.utime(b'/file-1', (atime_pre, mtime_pre)) + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_equal(stx_post['atime'], stx_pre['atime']) + assert_equal(stx_post['mtime'], stx_pre['mtime']) + + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_futime(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b'0000', 0) + + stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + time.sleep(1) + cephfs.futime(fd) + + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_greater(stx_post['atime'], stx_pre['atime']) + assert_greater(stx_post['mtime'], stx_pre['mtime']) + + atime_pre = int(time.mktime(stx_pre['atime'].timetuple())) + mtime_pre = int(time.mktime(stx_pre['mtime'].timetuple())) + + cephfs.futime(fd, (atime_pre, mtime_pre)) + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_equal(stx_post['atime'], stx_pre['atime']) + assert_equal(stx_post['mtime'], stx_pre['mtime']) + + cephfs.close(fd) + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_utimes(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b'0000', 0) + cephfs.close(fd) + + stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + time.sleep(1) + cephfs.utimes(b'/file-1') + + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_greater(stx_post['atime'], stx_pre['atime']) + assert_greater(stx_post['mtime'], stx_pre['mtime']) + + atime_pre = time.mktime(stx_pre['atime'].timetuple()) + mtime_pre = time.mktime(stx_pre['mtime'].timetuple()) + + cephfs.utimes(b'/file-1', (atime_pre, mtime_pre)) + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_equal(stx_post['atime'], stx_pre['atime']) + assert_equal(stx_post['mtime'], stx_pre['mtime']) + + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_lutimes(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b'0000', 0) + cephfs.close(fd) + + cephfs.symlink(b'/file-1', b'/file-2') + + stx_pre_t = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + stx_pre_s = cephfs.statx(b'/file-2', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, libcephfs.AT_SYMLINK_NOFOLLOW) + + time.sleep(1) + cephfs.lutimes(b'/file-2') + + stx_post_t = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + stx_post_s = cephfs.statx(b'/file-2', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, libcephfs.AT_SYMLINK_NOFOLLOW) + + assert_equal(stx_post_t['atime'], stx_pre_t['atime']) + assert_equal(stx_post_t['mtime'], stx_pre_t['mtime']) + + assert_greater(stx_post_s['atime'], stx_pre_s['atime']) + assert_greater(stx_post_s['mtime'], stx_pre_s['mtime']) + + atime_pre = time.mktime(stx_pre_s['atime'].timetuple()) + mtime_pre = time.mktime(stx_pre_s['mtime'].timetuple()) + + cephfs.lutimes(b'/file-2', (atime_pre, mtime_pre)) + stx_post_s = cephfs.statx(b'/file-2', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, libcephfs.AT_SYMLINK_NOFOLLOW) + + assert_equal(stx_post_s['atime'], stx_pre_s['atime']) + assert_equal(stx_post_s['mtime'], stx_pre_s['mtime']) + + cephfs.unlink(b'/file-2') + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_futimes(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b'0000', 0) + + stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + time.sleep(1) + cephfs.futimes(fd) + + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_greater(stx_post['atime'], stx_pre['atime']) + assert_greater(stx_post['mtime'], stx_pre['mtime']) + + atime_pre = time.mktime(stx_pre['atime'].timetuple()) + mtime_pre = time.mktime(stx_pre['mtime'].timetuple()) + + cephfs.futimes(fd, (atime_pre, mtime_pre)) + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_equal(stx_post['atime'], stx_pre['atime']) + assert_equal(stx_post['mtime'], stx_pre['mtime']) + + cephfs.close(fd) + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_futimens(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b'0000', 0) + + stx_pre = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + time.sleep(1) + cephfs.futimens(fd) + + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_greater(stx_post['atime'], stx_pre['atime']) + assert_greater(stx_post['mtime'], stx_pre['mtime']) + + atime_pre = time.mktime(stx_pre['atime'].timetuple()) + mtime_pre = time.mktime(stx_pre['mtime'].timetuple()) + + cephfs.futimens(fd, (atime_pre, mtime_pre)) + stx_post = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_MTIME, 0) + + assert_equal(stx_post['atime'], stx_pre['atime']) + assert_equal(stx_post['mtime'], stx_pre['mtime']) + + cephfs.close(fd) + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_lchmod(): + fd = cephfs.open(b'/file-1', 'w', 0o755) + cephfs.write(fd, b'0000', 0) + cephfs.close(fd) + + cephfs.symlink(b'/file-1', b'/file-2') + + stx_pre_t = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_MODE, 0) + stx_pre_s = cephfs.statx(b'/file-2', libcephfs.CEPH_STATX_MODE, libcephfs.AT_SYMLINK_NOFOLLOW) + + time.sleep(1) + cephfs.lchmod(b'/file-2', 0o400) + + stx_post_t = cephfs.statx(b'/file-1', libcephfs.CEPH_STATX_MODE, 0) + stx_post_s = cephfs.statx(b'/file-2', libcephfs.CEPH_STATX_MODE, libcephfs.AT_SYMLINK_NOFOLLOW) + + assert_equal(stx_post_t['mode'], stx_pre_t['mode']) + assert_not_equal(stx_post_s['mode'], stx_pre_s['mode']) + stx_post_s_perm_bits = stx_post_s['mode'] & ~stat.S_IFMT(stx_post_s["mode"]) + assert_equal(stx_post_s_perm_bits, 0o400) + + cephfs.unlink(b'/file-2') + cephfs.unlink(b'/file-1') + +@with_setup(setup_test) +def test_fchmod(): + fd = cephfs.open(b'/file-fchmod', 'w', 0o655) + st = cephfs.statx(b'/file-fchmod', libcephfs.CEPH_STATX_MODE, 0) + mode = st["mode"] | stat.S_IXUSR + cephfs.fchmod(fd, mode) + st = cephfs.statx(b'/file-fchmod', libcephfs.CEPH_STATX_MODE, 0) + assert_equal(st["mode"] & stat.S_IRWXU, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) + assert_raises(TypeError, cephfs.fchmod, "/file-fchmod", stat.S_IXUSR) + assert_raises(TypeError, cephfs.fchmod, fd, "stat.S_IXUSR") + cephfs.close(fd) + cephfs.unlink(b'/file-fchmod') + +@with_setup(setup_test) +def test_fchown(): + fd = cephfs.open(b'/file-fchown', 'w', 0o655) + uid = os.getuid() + gid = os.getgid() + assert_raises(TypeError, cephfs.fchown, b'/file-fchown', uid, gid) + assert_raises(TypeError, cephfs.fchown, fd, "uid", "gid") + cephfs.fchown(fd, uid, gid) + st = cephfs.statx(b'/file-fchown', libcephfs.CEPH_STATX_UID | libcephfs.CEPH_STATX_GID, 0) + assert_equal(st["uid"], uid) + assert_equal(st["gid"], gid) + cephfs.fchown(fd, 9999, 9999) + st = cephfs.statx(b'/file-fchown', libcephfs.CEPH_STATX_UID | libcephfs.CEPH_STATX_GID, 0) + assert_equal(st["uid"], 9999) + assert_equal(st["gid"], 9999) + cephfs.close(fd) + cephfs.unlink(b'/file-fchown') + +@with_setup(setup_test) +def test_truncate(): + fd = cephfs.open(b'/file-truncate', 'w', 0o755) + cephfs.write(fd, b"1111", 0) + cephfs.truncate(b'/file-truncate', 0) + stat = cephfs.fsync(fd, 0) + st = cephfs.statx(b'/file-truncate', libcephfs.CEPH_STATX_SIZE, 0) + assert_equal(st["size"], 0) + cephfs.close(fd) + cephfs.unlink(b'/file-truncate') + +@with_setup(setup_test) +def test_ftruncate(): + fd = cephfs.open(b'/file-ftruncate', 'w', 0o755) + cephfs.write(fd, b"1111", 0) + assert_raises(TypeError, cephfs.ftruncate, b'/file-ftruncate', 0) + cephfs.ftruncate(fd, 0) + stat = cephfs.fsync(fd, 0) + st = cephfs.fstat(fd) + assert_equal(st.st_size, 0) + cephfs.close(fd) + cephfs.unlink(b'/file-ftruncate') + +@with_setup(setup_test) +def test_fallocate(): + fd = cephfs.open(b'/file-fallocate', 'w', 0o755) + assert_raises(TypeError, cephfs.fallocate, b'/file-fallocate', 0, 10) + cephfs.fallocate(fd, 0, 10) + stat = cephfs.fsync(fd, 0) + st = cephfs.fstat(fd) + assert_equal(st.st_size, 10) + cephfs.close(fd) + cephfs.unlink(b'/file-fallocate') + +@with_setup(setup_test) +def test_mknod(): + mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR + cephfs.mknod(b'/file-fifo', mode) + st = cephfs.statx(b'/file-fifo', libcephfs.CEPH_STATX_MODE, 0) + assert_equal(st["mode"] & mode, mode) + cephfs.unlink(b'/file-fifo') + +@with_setup(setup_test) +def test_lazyio(): + fd = cephfs.open(b'/file-lazyio', 'w', 0o755) + assert_raises(TypeError, cephfs.lazyio, "fd", 1) + assert_raises(TypeError, cephfs.lazyio, fd, "1") + cephfs.lazyio(fd, 1) + cephfs.write(fd, b"1111", 0) + assert_raises(TypeError, cephfs.lazyio_propagate, "fd", 0, 4) + assert_raises(TypeError, cephfs.lazyio_propagate, fd, "0", 4) + assert_raises(TypeError, cephfs.lazyio_propagate, fd, 0, "4") + cephfs.lazyio_propagate(fd, 0, 4) + st = cephfs.fstat(fd) + assert_equal(st.st_size, 4) + cephfs.write(fd, b"2222", 4) + assert_raises(TypeError, cephfs.lazyio_synchronize, "fd", 0, 8) + assert_raises(TypeError, cephfs.lazyio_synchronize, fd, "0", 8) + assert_raises(TypeError, cephfs.lazyio_synchronize, fd, 0, "8") + cephfs.lazyio_synchronize(fd, 0, 8) + st = cephfs.fstat(fd) + assert_equal(st.st_size, 8) + cephfs.close(fd) + cephfs.unlink(b'/file-lazyio') + +@with_setup(setup_test) +def test_replication(): + fd = cephfs.open(b'/file-rep', 'w', 0o755) + assert_raises(TypeError, cephfs.get_file_replication, "fd") + l_dict = cephfs.get_layout(fd) + assert('pool_name' in l_dict.keys()) + cnt = cephfs.get_file_replication(fd) + get_rep_cnt_cmd = "ceph osd pool get " + l_dict["pool_name"] + " size" + s=os.popen(get_rep_cnt_cmd).read().strip('\n') + size=int(s.split(" ")[-1]) + assert_equal(cnt, size) + cnt = cephfs.get_path_replication(b'/file-rep') + assert_equal(cnt, size) + cephfs.close(fd) + cephfs.unlink(b'/file-rep') + +@with_setup(setup_test) +def test_caps(): + fd = cephfs.open(b'/file-caps', 'w', 0o755) + timeout = cephfs.get_cap_return_timeout() + assert_equal(timeout, 300) + fd_caps = cephfs.debug_get_fd_caps(fd) + file_caps = cephfs.debug_get_file_caps(b'/file-caps') + assert_equal(fd_caps, file_caps) + cephfs.close(fd) + cephfs.unlink(b'/file-caps') + +@with_setup(setup_test) +def test_setuuid(): + ses_id_uid = uuid.uuid1() + ses_id_str = str(ses_id_uid) + cephfs.set_uuid(ses_id_str) + +@with_setup(setup_test) +def test_session_timeout(): + assert_raises(TypeError, cephfs.set_session_timeout, "300") + cephfs.set_session_timeout(300) + +@with_setup(setup_test) +def test_readdirops(): + cephfs.chdir(b"/") + dirs = [b"dir-1", b"dir-2", b"dir-3"] + for i in dirs: + cephfs.mkdir(i, 0o755) + handler = cephfs.opendir(b"/") + d1 = cephfs.readdir(handler) + d2 = cephfs.readdir(handler) + d3 = cephfs.readdir(handler) + offset_d4 = cephfs.telldir(handler) + d4 = cephfs.readdir(handler) + cephfs.rewinddir(handler) + d = cephfs.readdir(handler) + assert_equal(d.d_name, d1.d_name) + cephfs.seekdir(handler, offset_d4) + d = cephfs.readdir(handler) + assert_equal(d.d_name, d4.d_name) + dirs += [b".", b".."] + cephfs.rewinddir(handler) + d = cephfs.readdir(handler) + while d: + assert(d.d_name in dirs) + dirs.remove(d.d_name) + d = cephfs.readdir(handler) + assert(len(dirs) == 0) + dirs = [b"/dir-1", b"/dir-2", b"/dir-3"] + for i in dirs: + cephfs.rmdir(i) + cephfs.closedir(handler) + +def test_preadv_pwritev(): + fd = cephfs.open(b'file-1', 'w', 0o755) + cephfs.pwritev(fd, [b"asdf", b"zxcvb"], 0) + cephfs.close(fd) + fd = cephfs.open(b'file-1', 'r', 0o755) + buf = [bytearray(i) for i in [4, 5]] + cephfs.preadv(fd, buf, 0) + assert_equal([b"asdf", b"zxcvb"], list(buf)) + cephfs.close(fd) + cephfs.unlink(b'file-1') + +@with_setup(setup_test) +def test_setattrx(): + fd = cephfs.open(b'file-setattrx', 'w', 0o655) + cephfs.write(fd, b"1111", 0) + cephfs.close(fd) + st = cephfs.statx(b'file-setattrx', libcephfs.CEPH_STATX_MODE, 0) + mode = st["mode"] | stat.S_IXUSR + assert_raises(TypeError, cephfs.setattrx, b'file-setattrx', "dict", 0, 0) + + time.sleep(1) + statx_dict = dict() + statx_dict["mode"] = mode + statx_dict["uid"] = 9999 + statx_dict["gid"] = 9999 + dt = datetime.now() + statx_dict["mtime"] = dt + statx_dict["atime"] = dt + statx_dict["ctime"] = dt + statx_dict["size"] = 10 + statx_dict["btime"] = dt + cephfs.setattrx(b'file-setattrx', statx_dict, libcephfs.CEPH_SETATTR_MODE | libcephfs.CEPH_SETATTR_UID | + libcephfs.CEPH_SETATTR_GID | libcephfs.CEPH_SETATTR_MTIME | + libcephfs.CEPH_SETATTR_ATIME | libcephfs.CEPH_SETATTR_CTIME | + libcephfs.CEPH_SETATTR_SIZE | libcephfs.CEPH_SETATTR_BTIME, 0) + st1 = cephfs.statx(b'file-setattrx', libcephfs.CEPH_STATX_MODE | libcephfs.CEPH_STATX_UID | + libcephfs.CEPH_STATX_GID | libcephfs.CEPH_STATX_MTIME | + libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_CTIME | + libcephfs.CEPH_STATX_SIZE | libcephfs.CEPH_STATX_BTIME, 0) + assert_equal(mode, st1["mode"]) + assert_equal(9999, st1["uid"]) + assert_equal(9999, st1["gid"]) + assert_equal(int(dt.timestamp()), int(st1["mtime"].timestamp())) + assert_equal(int(dt.timestamp()), int(st1["atime"].timestamp())) + assert_equal(int(dt.timestamp()), int(st1["ctime"].timestamp())) + assert_equal(int(dt.timestamp()), int(st1["btime"].timestamp())) + assert_equal(10, st1["size"]) + cephfs.unlink(b'file-setattrx') + +@with_setup(setup_test) +def test_fsetattrx(): + fd = cephfs.open(b'file-fsetattrx', 'w', 0o655) + cephfs.write(fd, b"1111", 0) + st = cephfs.statx(b'file-fsetattrx', libcephfs.CEPH_STATX_MODE, 0) + mode = st["mode"] | stat.S_IXUSR + assert_raises(TypeError, cephfs.fsetattrx, fd, "dict", 0, 0) + + time.sleep(1) + statx_dict = dict() + statx_dict["mode"] = mode + statx_dict["uid"] = 9999 + statx_dict["gid"] = 9999 + dt = datetime.now() + statx_dict["mtime"] = dt + statx_dict["atime"] = dt + statx_dict["ctime"] = dt + statx_dict["size"] = 10 + statx_dict["btime"] = dt + cephfs.fsetattrx(fd, statx_dict, libcephfs.CEPH_SETATTR_MODE | libcephfs.CEPH_SETATTR_UID | + libcephfs.CEPH_SETATTR_GID | libcephfs.CEPH_SETATTR_MTIME | + libcephfs.CEPH_SETATTR_ATIME | libcephfs.CEPH_SETATTR_CTIME | + libcephfs.CEPH_SETATTR_SIZE | libcephfs.CEPH_SETATTR_BTIME) + st1 = cephfs.statx(b'file-fsetattrx', libcephfs.CEPH_STATX_MODE | libcephfs.CEPH_STATX_UID | + libcephfs.CEPH_STATX_GID | libcephfs.CEPH_STATX_MTIME | + libcephfs.CEPH_STATX_ATIME | libcephfs.CEPH_STATX_CTIME | + libcephfs.CEPH_STATX_SIZE | libcephfs.CEPH_STATX_BTIME, 0) + assert_equal(mode, st1["mode"]) + assert_equal(9999, st1["uid"]) + assert_equal(9999, st1["gid"]) + assert_equal(int(dt.timestamp()), int(st1["mtime"].timestamp())) + assert_equal(int(dt.timestamp()), int(st1["atime"].timestamp())) + assert_equal(int(dt.timestamp()), int(st1["ctime"].timestamp())) + assert_equal(int(dt.timestamp()), int(st1["btime"].timestamp())) + assert_equal(10, st1["size"]) + cephfs.close(fd) + cephfs.unlink(b'file-fsetattrx') + +@with_setup(setup_test) +def test_get_layout(): + fd = cephfs.open(b'file-get-layout', 'w', 0o755) + cephfs.write(fd, b"1111", 0) + assert_raises(TypeError, cephfs.get_layout, "fd") + l_dict = cephfs.get_layout(fd) + assert('stripe_unit' in l_dict.keys()) + assert('stripe_count' in l_dict.keys()) + assert('object_size' in l_dict.keys()) + assert('pool_id' in l_dict.keys()) + assert('pool_name' in l_dict.keys()) + + cephfs.close(fd) + cephfs.unlink(b'file-get-layout') + +@with_setup(setup_test) +def test_get_default_pool(): + dp_dict = cephfs.get_default_pool() + assert('pool_id' in dp_dict.keys()) + assert('pool_name' in dp_dict.keys()) + +@with_setup(setup_test) +def test_get_pool(): + dp_dict = cephfs.get_default_pool() + assert('pool_id' in dp_dict.keys()) + assert('pool_name' in dp_dict.keys()) + assert_equal(cephfs.get_pool_id(dp_dict["pool_name"]), dp_dict["pool_id"]) + get_rep_cnt_cmd = "ceph osd pool get " + dp_dict["pool_name"] + " size" + s=os.popen(get_rep_cnt_cmd).read().strip('\n') + size=int(s.split(" ")[-1]) + assert_equal(cephfs.get_pool_replication(dp_dict["pool_id"]), size) + +@with_setup(setup_test) +def test_disk_quota_exceeeded_error(): + cephfs.mkdir("/dir-1", 0o755) + cephfs.setxattr("/dir-1", "ceph.quota.max_bytes", b"5", 0) + fd = cephfs.open(b'/dir-1/file-1', 'w', 0o755) + assert_raises(libcephfs.DiskQuotaExceeded, cephfs.write, fd, b"abcdeghiklmnopqrstuvwxyz", 0) + cephfs.close(fd) + cephfs.unlink(b"/dir-1/file-1") + +@with_setup(setup_test) +def test_empty_snapshot_info(): + cephfs.mkdir("/dir-1", 0o755) + + # snap without metadata + cephfs.mkdir("/dir-1/.snap/snap0", 0o755) + snap_info = cephfs.snap_info("/dir-1/.snap/snap0") + assert_equal(snap_info["metadata"], {}) + assert_greater(snap_info["id"], 0) + cephfs.rmdir("/dir-1/.snap/snap0") + + # remove directory + cephfs.rmdir("/dir-1") + +@with_setup(setup_test) +def test_snapshot_info(): + cephfs.mkdir("/dir-1", 0o755) + + # snap with custom metadata + md = {"foo": "bar", "zig": "zag", "abcdefg": "12345"} + cephfs.mksnap("/dir-1", "snap0", 0o755, metadata=md) + snap_info = cephfs.snap_info("/dir-1/.snap/snap0") + assert_equal(snap_info["metadata"]["foo"], md["foo"]) + assert_equal(snap_info["metadata"]["zig"], md["zig"]) + assert_equal(snap_info["metadata"]["abcdefg"], md["abcdefg"]) + assert_greater(snap_info["id"], 0) + cephfs.rmsnap("/dir-1", "snap0") + + # remove directory + cephfs.rmdir("/dir-1") + +@with_setup(setup_test) +def test_set_mount_timeout_post_mount(): + assert_raises(libcephfs.LibCephFSStateError, cephfs.set_mount_timeout, 5) + +@with_setup(setup_test) +def test_set_mount_timeout(): + cephfs.unmount() + cephfs.set_mount_timeout(5) + cephfs.mount() + +@with_setup(setup_test) +def test_set_mount_timeout_lt0(): + cephfs.unmount() + assert_raises(libcephfs.InvalidValue, cephfs.set_mount_timeout, -5) + cephfs.mount() diff --git a/src/test/pybind/test_rados.py b/src/test/pybind/test_rados.py new file mode 100644 index 000000000..ff6fee3db --- /dev/null +++ b/src/test/pybind/test_rados.py @@ -0,0 +1,1538 @@ +from __future__ import print_function +from nose import SkipTest +from nose.plugins.attrib import attr +from nose.tools import eq_ as eq, ok_ as ok, assert_raises +from rados import (Rados, Error, RadosStateError, Object, ObjectExists, + ObjectNotFound, ObjectBusy, NotConnected, + LIBRADOS_ALL_NSPACES, WriteOpCtx, ReadOpCtx, LIBRADOS_CREATE_EXCLUSIVE, + LIBRADOS_CMPXATTR_OP_EQ, LIBRADOS_CMPXATTR_OP_GT, LIBRADOS_CMPXATTR_OP_LT, OSError, + LIBRADOS_SNAP_HEAD, LIBRADOS_OPERATION_BALANCE_READS, LIBRADOS_OPERATION_SKIPRWLOCKS, MonitorLog, MAX_ERRNO, NoData, ExtendMismatch) +from datetime import timedelta +import time +import threading +import json +import errno +import os +import re +import sys + +def test_rados_init_error(): + assert_raises(Error, Rados, conffile='', rados_id='admin', + name='client.admin') + assert_raises(Error, Rados, conffile='', name='invalid') + assert_raises(Error, Rados, conffile='', name='bad.invalid') + +def test_rados_init(): + with Rados(conffile='', rados_id='admin'): + pass + with Rados(conffile='', name='client.admin'): + pass + with Rados(conffile='', name='client.admin'): + pass + with Rados(conffile='', name='client.admin'): + pass + +def test_ioctx_context_manager(): + with Rados(conffile='', rados_id='admin') as conn: + with conn.open_ioctx('rbd') as ioctx: + pass + +def test_parse_argv(): + args = ['osd', 'pool', 'delete', 'foobar', 'foobar', '--yes-i-really-really-mean-it'] + r = Rados() + eq(args, r.conf_parse_argv(args)) + +def test_parse_argv_empty_str(): + args = [''] + r = Rados() + eq(args, r.conf_parse_argv(args)) + +class TestRadosStateError(object): + def _requires_configuring(self, rados): + assert_raises(RadosStateError, rados.connect) + + def _requires_configuring_or_connected(self, rados): + assert_raises(RadosStateError, rados.conf_read_file) + assert_raises(RadosStateError, rados.conf_parse_argv, None) + assert_raises(RadosStateError, rados.conf_parse_env) + assert_raises(RadosStateError, rados.conf_get, 'opt') + assert_raises(RadosStateError, rados.conf_set, 'opt', 'val') + assert_raises(RadosStateError, rados.ping_monitor, '0') + + def _requires_connected(self, rados): + assert_raises(RadosStateError, rados.pool_exists, 'foo') + assert_raises(RadosStateError, rados.pool_lookup, 'foo') + assert_raises(RadosStateError, rados.pool_reverse_lookup, 0) + assert_raises(RadosStateError, rados.create_pool, 'foo') + assert_raises(RadosStateError, rados.get_pool_base_tier, 0) + assert_raises(RadosStateError, rados.delete_pool, 'foo') + assert_raises(RadosStateError, rados.list_pools) + assert_raises(RadosStateError, rados.get_fsid) + assert_raises(RadosStateError, rados.open_ioctx, 'foo') + assert_raises(RadosStateError, rados.mon_command, '', b'') + assert_raises(RadosStateError, rados.osd_command, 0, '', b'') + assert_raises(RadosStateError, rados.pg_command, '', '', b'') + assert_raises(RadosStateError, rados.wait_for_latest_osdmap) + assert_raises(RadosStateError, rados.blocklist_add, '127.0.0.1/123', 0) + + def test_configuring(self): + rados = Rados(conffile='') + eq('configuring', rados.state) + self._requires_connected(rados) + + def test_connected(self): + rados = Rados(conffile='') + with rados: + eq('connected', rados.state) + self._requires_configuring(rados) + + def test_shutdown(self): + rados = Rados(conffile='') + with rados: + pass + eq('shutdown', rados.state) + self._requires_configuring(rados) + self._requires_configuring_or_connected(rados) + self._requires_connected(rados) + + +class TestRados(object): + + def setUp(self): + self.rados = Rados(conffile='') + self.rados.conf_parse_env('FOO_DOES_NOT_EXIST_BLAHBLAH') + self.rados.conf_parse_env() + self.rados.connect() + + # Assume any pre-existing pools are the cluster's defaults + self.default_pools = self.rados.list_pools() + + def tearDown(self): + self.rados.shutdown() + + def test_ping_monitor(self): + assert_raises(ObjectNotFound, self.rados.ping_monitor, 'not_exists_monitor') + cmd = {'prefix': 'mon dump', 'format':'json'} + ret, buf, out = self.rados.mon_command(json.dumps(cmd), b'') + for mon in json.loads(buf.decode('utf8'))['mons']: + while True: + output = self.rados.ping_monitor(mon['name']) + if output is None: + continue + buf = json.loads(output) + if buf.get('health'): + break + + def test_annotations(self): + with assert_raises(TypeError): + self.rados.create_pool(0xf00) + + def test_create(self): + self.rados.create_pool('foo') + self.rados.delete_pool('foo') + + def test_create_utf8(self): + poolname = "\u9ec4" + self.rados.create_pool(poolname) + assert self.rados.pool_exists(u"\u9ec4") + self.rados.delete_pool(poolname) + + def test_pool_lookup_utf8(self): + poolname = '\u9ec4' + self.rados.create_pool(poolname) + try: + poolid = self.rados.pool_lookup(poolname) + eq(poolname, self.rados.pool_reverse_lookup(poolid)) + finally: + self.rados.delete_pool(poolname) + + def test_eexist(self): + self.rados.create_pool('foo') + assert_raises(ObjectExists, self.rados.create_pool, 'foo') + self.rados.delete_pool('foo') + + def list_non_default_pools(self): + pools = self.rados.list_pools() + for p in self.default_pools: + pools.remove(p) + return set(pools) + + def test_list_pools(self): + eq(set(), self.list_non_default_pools()) + self.rados.create_pool('foo') + eq(set(['foo']), self.list_non_default_pools()) + self.rados.create_pool('bar') + eq(set(['foo', 'bar']), self.list_non_default_pools()) + self.rados.create_pool('baz') + eq(set(['foo', 'bar', 'baz']), self.list_non_default_pools()) + self.rados.delete_pool('foo') + eq(set(['bar', 'baz']), self.list_non_default_pools()) + self.rados.delete_pool('baz') + eq(set(['bar']), self.list_non_default_pools()) + self.rados.delete_pool('bar') + eq(set(), self.list_non_default_pools()) + self.rados.create_pool('a' * 500) + eq(set(['a' * 500]), self.list_non_default_pools()) + self.rados.delete_pool('a' * 500) + + @attr('tier') + def test_get_pool_base_tier(self): + self.rados.create_pool('foo') + try: + self.rados.create_pool('foo-cache') + try: + pool_id = self.rados.pool_lookup('foo') + tier_pool_id = self.rados.pool_lookup('foo-cache') + + cmd = {"prefix":"osd tier add", "pool":"foo", "tierpool":"foo-cache", "force_nonempty":""} + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, 0) + + try: + cmd = {"prefix":"osd tier cache-mode", "pool":"foo-cache", "tierpool":"foo-cache", "mode":"readonly", "yes_i_really_mean_it": True} + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, 0) + + eq(self.rados.wait_for_latest_osdmap(), 0) + + eq(pool_id, self.rados.get_pool_base_tier(pool_id)) + eq(pool_id, self.rados.get_pool_base_tier(tier_pool_id)) + finally: + cmd = {"prefix":"osd tier remove", "pool":"foo", "tierpool":"foo-cache"} + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, 0) + finally: + self.rados.delete_pool('foo-cache') + finally: + self.rados.delete_pool('foo') + + def test_get_fsid(self): + fsid = self.rados.get_fsid() + assert re.match('[0-9a-f\-]{36}', fsid, re.I) + + def test_blocklist_add(self): + self.rados.blocklist_add("1.2.3.4/123", 1) + + @attr('stats') + def test_get_cluster_stats(self): + stats = self.rados.get_cluster_stats() + assert stats['kb'] > 0 + assert stats['kb_avail'] > 0 + assert stats['kb_used'] > 0 + assert stats['num_objects'] >= 0 + + def test_monitor_log(self): + lock = threading.Condition() + def cb(arg, line, who, sec, nsec, seq, level, msg): + # NOTE(sileht): the old pyrados API was received the pointer as int + # instead of the value of arg + eq(arg, "arg") + with lock: + lock.notify() + return 0 + + # NOTE(sileht): force don't save the monitor into local var + # to ensure all references are correctly tracked into the lib + MonitorLog(self.rados, "debug", cb, "arg") + with lock: + lock.wait() + MonitorLog(self.rados, "debug", None, None) + eq(None, self.rados.monitor_callback) + +class TestIoctx(object): + + def setUp(self): + self.rados = Rados(conffile='') + self.rados.connect() + self.rados.create_pool('test_pool') + assert self.rados.pool_exists('test_pool') + self.ioctx = self.rados.open_ioctx('test_pool') + + def tearDown(self): + cmd = {"prefix":"osd unset", "key":"noup"} + self.rados.mon_command(json.dumps(cmd), b'') + self.ioctx.close() + self.rados.delete_pool('test_pool') + self.rados.shutdown() + + def test_get_last_version(self): + version = self.ioctx.get_last_version() + assert version >= 0 + + def test_get_stats(self): + stats = self.ioctx.get_stats() + eq(stats, {'num_objects_unfound': 0, + 'num_objects_missing_on_primary': 0, + 'num_object_clones': 0, + 'num_objects': 0, + 'num_object_copies': 0, + 'num_bytes': 0, + 'num_rd_kb': 0, + 'num_wr_kb': 0, + 'num_kb': 0, + 'num_wr': 0, + 'num_objects_degraded': 0, + 'num_rd': 0}) + + def test_write(self): + self.ioctx.write('abc', b'abc') + eq(self.ioctx.read('abc'), b'abc') + + def test_write_full(self): + self.ioctx.write('abc', b'abc') + eq(self.ioctx.read('abc'), b'abc') + self.ioctx.write_full('abc', b'd') + eq(self.ioctx.read('abc'), b'd') + + def test_writesame(self): + self.ioctx.writesame('ob', b'rzx', 9) + eq(self.ioctx.read('ob'), b'rzxrzxrzx') + + def test_append(self): + self.ioctx.write('abc', b'a') + self.ioctx.append('abc', b'b') + self.ioctx.append('abc', b'c') + eq(self.ioctx.read('abc'), b'abc') + + def test_write_zeros(self): + self.ioctx.write('abc', b'a\0b\0c') + eq(self.ioctx.read('abc'), b'a\0b\0c') + + def test_trunc(self): + self.ioctx.write('abc', b'abc') + self.ioctx.trunc('abc', 2) + eq(self.ioctx.read('abc'), b'ab') + size = self.ioctx.stat('abc')[0] + eq(size, 2) + + def test_cmpext(self): + self.ioctx.write('test_object', b'abcdefghi') + eq(0, self.ioctx.cmpext('test_object', b'abcdefghi', 0)) + eq(-MAX_ERRNO - 4, self.ioctx.cmpext('test_object', b'abcdxxxxx', 0)) + + def test_list_objects_empty(self): + eq(list(self.ioctx.list_objects()), []) + + def test_list_objects(self): + self.ioctx.write('a', b'') + self.ioctx.write('b', b'foo') + self.ioctx.write_full('c', b'bar') + self.ioctx.append('d', b'jazz') + object_names = [obj.key for obj in self.ioctx.list_objects()] + eq(sorted(object_names), ['a', 'b', 'c', 'd']) + + def test_list_ns_objects(self): + self.ioctx.write('a', b'') + self.ioctx.write('b', b'foo') + self.ioctx.write_full('c', b'bar') + self.ioctx.append('d', b'jazz') + self.ioctx.set_namespace("ns1") + self.ioctx.write('ns1-a', b'') + self.ioctx.write('ns1-b', b'foo') + self.ioctx.write_full('ns1-c', b'bar') + self.ioctx.append('ns1-d', b'jazz') + self.ioctx.append('d', b'jazz') + self.ioctx.set_namespace(LIBRADOS_ALL_NSPACES) + object_names = [(obj.nspace, obj.key) for obj in self.ioctx.list_objects()] + eq(sorted(object_names), [('', 'a'), ('','b'), ('','c'), ('','d'),\ + ('ns1', 'd'), ('ns1', 'ns1-a'), ('ns1', 'ns1-b'),\ + ('ns1', 'ns1-c'), ('ns1', 'ns1-d')]) + + def test_xattrs(self): + xattrs = dict(a=b'1', b=b'2', c=b'3', d=b'a\0b', e=b'\0', f=b'') + self.ioctx.write('abc', b'') + for key, value in xattrs.items(): + self.ioctx.set_xattr('abc', key, value) + eq(self.ioctx.get_xattr('abc', key), value) + stored_xattrs = {} + for key, value in self.ioctx.get_xattrs('abc'): + stored_xattrs[key] = value + eq(stored_xattrs, xattrs) + + def test_obj_xattrs(self): + xattrs = dict(a=b'1', b=b'2', c=b'3', d=b'a\0b', e=b'\0', f=b'') + self.ioctx.write('abc', b'') + obj = list(self.ioctx.list_objects())[0] + for key, value in xattrs.items(): + obj.set_xattr(key, value) + eq(obj.get_xattr(key), value) + stored_xattrs = {} + for key, value in obj.get_xattrs(): + stored_xattrs[key] = value + eq(stored_xattrs, xattrs) + + def test_get_pool_id(self): + eq(self.ioctx.get_pool_id(), self.rados.pool_lookup('test_pool')) + + def test_get_pool_name(self): + eq(self.ioctx.get_pool_name(), 'test_pool') + + @attr('snap') + def test_create_snap(self): + assert_raises(ObjectNotFound, self.ioctx.remove_snap, 'foo') + self.ioctx.create_snap('foo') + self.ioctx.remove_snap('foo') + + @attr('snap') + def test_list_snaps_empty(self): + eq(list(self.ioctx.list_snaps()), []) + + @attr('snap') + def test_list_snaps(self): + snaps = ['snap1', 'snap2', 'snap3'] + for snap in snaps: + self.ioctx.create_snap(snap) + listed_snaps = [snap.name for snap in self.ioctx.list_snaps()] + eq(snaps, listed_snaps) + + @attr('snap') + def test_lookup_snap(self): + self.ioctx.create_snap('foo') + snap = self.ioctx.lookup_snap('foo') + eq(snap.name, 'foo') + + @attr('snap') + def test_snap_timestamp(self): + self.ioctx.create_snap('foo') + snap = self.ioctx.lookup_snap('foo') + snap.get_timestamp() + + @attr('snap') + def test_remove_snap(self): + self.ioctx.create_snap('foo') + (snap,) = self.ioctx.list_snaps() + eq(snap.name, 'foo') + self.ioctx.remove_snap('foo') + eq(list(self.ioctx.list_snaps()), []) + + @attr('snap') + def test_snap_rollback(self): + self.ioctx.write("insnap", b"contents1") + self.ioctx.create_snap("snap1") + self.ioctx.remove_object("insnap") + self.ioctx.snap_rollback("insnap", "snap1") + eq(self.ioctx.read("insnap"), b"contents1") + self.ioctx.remove_snap("snap1") + self.ioctx.remove_object("insnap") + + @attr('snap') + def test_snap_read(self): + self.ioctx.write("insnap", b"contents1") + self.ioctx.create_snap("snap1") + self.ioctx.remove_object("insnap") + snap = self.ioctx.lookup_snap("snap1") + self.ioctx.set_read(snap.snap_id) + eq(self.ioctx.read("insnap"), b"contents1") + self.ioctx.set_read(LIBRADOS_SNAP_HEAD) + self.ioctx.write("inhead", b"contents2") + eq(self.ioctx.read("inhead"), b"contents2") + self.ioctx.remove_snap("snap1") + self.ioctx.remove_object("inhead") + + def test_set_omap(self): + keys = ("1", "2", "3", "4") + values = (b"aaa", b"bbb", b"ccc", b"\x04\x04\x04\x04") + with WriteOpCtx() as write_op: + self.ioctx.set_omap(write_op, keys, values) + write_op.set_flags(LIBRADOS_OPERATION_SKIPRWLOCKS) + self.ioctx.operate_write_op(write_op, "hw") + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals(read_op, "", "", 4) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, "hw") + next(iter) + eq(list(iter), [("2", b"bbb"), ("3", b"ccc"), ("4", b"\x04\x04\x04\x04")]) + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals(read_op, "2", "", 4) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, "hw") + eq(("3", b"ccc"), next(iter)) + eq(list(iter), [("4", b"\x04\x04\x04\x04")]) + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals(read_op, "", "2", 4) + eq(ret, 0) + read_op.set_flags(LIBRADOS_OPERATION_BALANCE_READS) + self.ioctx.operate_read_op(read_op, "hw") + eq(list(iter), [("2", b"bbb")]) + + def test_set_omap_aio(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + + keys = ("1", "2", "3", "4") + values = (b"aaa", b"bbb", b"ccc", b"\x04\x04\x04\x04") + with WriteOpCtx() as write_op: + self.ioctx.set_omap(write_op, keys, values) + comp = self.ioctx.operate_aio_write_op(write_op, "hw", cb, cb) + comp.wait_for_complete() + with lock: + while count[0] < 2: + lock.wait() + eq(comp.get_return_value(), 0) + + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals(read_op, "", "", 4) + eq(ret, 0) + comp = self.ioctx.operate_aio_read_op(read_op, "hw", cb, cb) + comp.wait_for_complete() + with lock: + while count[0] < 4: + lock.wait() + eq(comp.get_return_value(), 0) + next(iter) + eq(list(iter), [("2", b"bbb"), ("3", b"ccc"), ("4", b"\x04\x04\x04\x04")]) + + def test_write_ops(self): + with WriteOpCtx() as write_op: + write_op.new(0) + self.ioctx.operate_write_op(write_op, "write_ops") + eq(self.ioctx.read('write_ops'), b'') + + write_op.write_full(b'1') + write_op.append(b'2') + self.ioctx.operate_write_op(write_op, "write_ops") + eq(self.ioctx.read('write_ops'), b'12') + + write_op.write_full(b'12345') + write_op.write(b'x', 2) + self.ioctx.operate_write_op(write_op, "write_ops") + eq(self.ioctx.read('write_ops'), b'12x45') + + write_op.write_full(b'12345') + write_op.zero(2, 2) + self.ioctx.operate_write_op(write_op, "write_ops") + eq(self.ioctx.read('write_ops'), b'12\x00\x005') + + write_op.write_full(b'12345') + write_op.truncate(2) + self.ioctx.operate_write_op(write_op, "write_ops") + eq(self.ioctx.read('write_ops'), b'12') + + write_op.remove() + self.ioctx.operate_write_op(write_op, "write_ops") + with assert_raises(ObjectNotFound): + self.ioctx.read('write_ops') + + def test_execute_op(self): + with WriteOpCtx() as write_op: + write_op.execute("hello", "record_hello", b"ebs") + self.ioctx.operate_write_op(write_op, "object") + eq(self.ioctx.read('object'), b"Hello, ebs!") + + def test_writesame_op(self): + with WriteOpCtx() as write_op: + write_op.writesame(b'rzx', 9) + self.ioctx.operate_write_op(write_op, 'abc') + eq(self.ioctx.read('abc'), b'rzxrzxrzx') + + def test_get_omap_vals_by_keys(self): + keys = ("1", "2", "3", "4") + values = (b"aaa", b"bbb", b"ccc", b"\x04\x04\x04\x04") + with WriteOpCtx() as write_op: + self.ioctx.set_omap(write_op, keys, values) + self.ioctx.operate_write_op(write_op, "hw") + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals_by_keys(read_op,("3","4",)) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, "hw") + eq(list(iter), [("3", b"ccc"), ("4", b"\x04\x04\x04\x04")]) + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals_by_keys(read_op,("3","4",)) + eq(ret, 0) + with assert_raises(ObjectNotFound): + self.ioctx.operate_read_op(read_op, "no_such") + + def test_get_omap_keys(self): + keys = ("1", "2", "3") + values = (b"aaa", b"bbb", b"ccc") + with WriteOpCtx() as write_op: + self.ioctx.set_omap(write_op, keys, values) + self.ioctx.operate_write_op(write_op, "hw") + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_keys(read_op,"",2) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, "hw") + eq(list(iter), [("1", None), ("2", None)]) + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_keys(read_op,"",2) + eq(ret, 0) + with assert_raises(ObjectNotFound): + self.ioctx.operate_read_op(read_op, "no_such") + + def test_clear_omap(self): + keys = ("1", "2", "3") + values = (b"aaa", b"bbb", b"ccc") + with WriteOpCtx() as write_op: + self.ioctx.set_omap(write_op, keys, values) + self.ioctx.operate_write_op(write_op, "hw") + with WriteOpCtx() as write_op_1: + self.ioctx.clear_omap(write_op_1) + self.ioctx.operate_write_op(write_op_1, "hw") + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals_by_keys(read_op,("1",)) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, "hw") + eq(list(iter), []) + + def test_remove_omap_ramge2(self): + keys = ("1", "2", "3", "4") + values = (b"a", b"bb", b"ccc", b"dddd") + with WriteOpCtx() as write_op: + self.ioctx.set_omap(write_op, keys, values) + self.ioctx.operate_write_op(write_op, "test_obj") + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals_by_keys(read_op, keys) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, "test_obj") + eq(list(iter), list(zip(keys, values))) + with WriteOpCtx() as write_op: + self.ioctx.remove_omap_range2(write_op, "1", "4") + self.ioctx.operate_write_op(write_op, "test_obj") + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals_by_keys(read_op, keys) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, "test_obj") + eq(list(iter), [("4", b"dddd")]) + + def test_omap_cmp(self): + object_id = 'test' + self.ioctx.write(object_id, b'omap_cmp') + with WriteOpCtx() as write_op: + self.ioctx.set_omap(write_op, ('key1',), ('1',)) + self.ioctx.operate_write_op(write_op, object_id) + with WriteOpCtx() as write_op: + write_op.omap_cmp('key1', '1', LIBRADOS_CMPXATTR_OP_EQ) + self.ioctx.set_omap(write_op, ('key1',), ('2',)) + self.ioctx.operate_write_op(write_op, object_id) + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals_by_keys(read_op, ('key1',)) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, object_id) + eq(list(iter), [('key1', b'2')]) + with WriteOpCtx() as write_op: + write_op.omap_cmp('key1', '1', LIBRADOS_CMPXATTR_OP_GT) + self.ioctx.set_omap(write_op, ('key1',), ('3',)) + self.ioctx.operate_write_op(write_op, object_id) + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals_by_keys(read_op, ('key1',)) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, object_id) + eq(list(iter), [('key1', b'3')]) + with WriteOpCtx() as write_op: + write_op.omap_cmp('key1', '4', LIBRADOS_CMPXATTR_OP_LT) + self.ioctx.set_omap(write_op, ('key1',), ('4',)) + self.ioctx.operate_write_op(write_op, object_id) + with ReadOpCtx() as read_op: + iter, ret = self.ioctx.get_omap_vals_by_keys(read_op, ('key1',)) + eq(ret, 0) + self.ioctx.operate_read_op(read_op, object_id) + eq(list(iter), [('key1', b'4')]) + with WriteOpCtx() as write_op: + write_op.omap_cmp('key1', '1', LIBRADOS_CMPXATTR_OP_EQ) + self.ioctx.set_omap(write_op, ('key1',), ('5',)) + try: + self.ioctx.operate_write_op(write_op, object_id) + except (OSError, ExtendMismatch) as e: + eq(e.errno, 125) + else: + message = "omap_cmp did not raise Exception when omap content does not match" + raise AssertionError(message) + + def test_cmpext_op(self): + object_id = 'test' + with WriteOpCtx() as write_op: + write_op.write(b'12345', 0) + self.ioctx.operate_write_op(write_op, object_id) + with WriteOpCtx() as write_op: + write_op.cmpext(b'12345', 0) + write_op.write(b'54321', 0) + self.ioctx.operate_write_op(write_op, object_id) + eq(self.ioctx.read(object_id), b'54321') + with WriteOpCtx() as write_op: + write_op.cmpext(b'56789', 0) + write_op.write(b'12345', 0) + try: + self.ioctx.operate_write_op(write_op, object_id) + except ExtendMismatch as e: + # the cmpext_result compare with expected error number, it should be (-MAX_ERRNO - 1) + # where "1" is the offset of the first unmatched byte + eq(-e.errno, -MAX_ERRNO - 1) + eq(e.offset, 1) + else: + message = "cmpext did not raise Exception when object content does not match" + raise AssertionError(message) + with ReadOpCtx() as read_op: + read_op.cmpext(b'54321', 0) + self.ioctx.operate_read_op(read_op, object_id) + with ReadOpCtx() as read_op: + read_op.cmpext(b'54789', 0) + try: + self.ioctx.operate_read_op(read_op, object_id) + except ExtendMismatch as e: + # the cmpext_result compare with expected error number, it should be (-MAX_ERRNO - 2) + # where "2" is the offset of the first unmatched byte + eq(-e.errno, -MAX_ERRNO - 2) + eq(e.offset, 2) + else: + message = "cmpext did not raise Exception when object content does not match" + raise AssertionError(message) + + def test_xattrs_op(self): + xattrs = dict(a=b'1', b=b'2', c=b'3', d=b'a\0b', e=b'\0') + with WriteOpCtx() as write_op: + write_op.new(LIBRADOS_CREATE_EXCLUSIVE) + for key, value in xattrs.items(): + write_op.set_xattr(key, value) + self.ioctx.operate_write_op(write_op, 'abc') + eq(self.ioctx.get_xattr('abc', key), value) + + stored_xattrs_1 = {} + for key, value in self.ioctx.get_xattrs('abc'): + stored_xattrs_1[key] = value + eq(stored_xattrs_1, xattrs) + + for key in xattrs.keys(): + write_op.rm_xattr(key) + self.ioctx.operate_write_op(write_op, 'abc') + stored_xattrs_2 = {} + for key, value in self.ioctx.get_xattrs('abc'): + stored_xattrs_2[key] = value + eq(stored_xattrs_2, {}) + + write_op.remove() + self.ioctx.operate_write_op(write_op, 'abc') + + def test_locator(self): + self.ioctx.set_locator_key("bar") + self.ioctx.write('foo', b'contents1') + objects = [i for i in self.ioctx.list_objects()] + eq(len(objects), 1) + eq(self.ioctx.get_locator_key(), "bar") + self.ioctx.set_locator_key("") + objects[0].seek(0) + objects[0].write(b"contents2") + eq(self.ioctx.get_locator_key(), "") + self.ioctx.set_locator_key("bar") + contents = self.ioctx.read("foo") + eq(contents, b"contents2") + eq(self.ioctx.get_locator_key(), "bar") + objects[0].remove() + objects = [i for i in self.ioctx.list_objects()] + eq(objects, []) + self.ioctx.set_locator_key("") + + def test_operate_aio_write_op(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + with WriteOpCtx() as write_op: + write_op.write(b'rzx') + comp = self.ioctx.operate_aio_write_op(write_op, "object", cb, cb) + comp.wait_for_complete() + with lock: + while count[0] < 2: + lock.wait() + eq(comp.get_return_value(), 0) + eq(self.ioctx.read('object'), b'rzx') + + def test_aio_write(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + comp = self.ioctx.aio_write("foo", b"bar", 0, cb, cb) + comp.wait_for_complete() + with lock: + while count[0] < 2: + lock.wait() + eq(comp.get_return_value(), 0) + contents = self.ioctx.read("foo") + eq(contents, b"bar") + [i.remove() for i in self.ioctx.list_objects()] + + def test_aio_cmpext(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + + self.ioctx.write('test_object', b'abcdefghi') + comp = self.ioctx.aio_cmpext('test_object', b'abcdefghi', 0, cb) + comp.wait_for_complete() + with lock: + while count[0] < 1: + lock.wait() + eq(comp.get_return_value(), 0) + + def test_aio_rmxattr(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + self.ioctx.set_xattr("xyz", "key", b'value') + eq(self.ioctx.get_xattr("xyz", "key"), b'value') + comp = self.ioctx.aio_rmxattr("xyz", "key", cb) + comp.wait_for_complete() + with lock: + while count[0] < 1: + lock.wait() + eq(comp.get_return_value(), 0) + with assert_raises(NoData): + self.ioctx.get_xattr("xyz", "key") + + def test_aio_write_no_comp_ref(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + # NOTE(sileht): force don't save the comp into local var + # to ensure all references are correctly tracked into the lib + self.ioctx.aio_write("foo", b"bar", 0, cb, cb) + with lock: + while count[0] < 2: + lock.wait() + contents = self.ioctx.read("foo") + eq(contents, b"bar") + [i.remove() for i in self.ioctx.list_objects()] + + def test_aio_append(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + comp = self.ioctx.aio_write("foo", b"bar", 0, cb, cb) + comp2 = self.ioctx.aio_append("foo", b"baz", cb, cb) + comp.wait_for_complete() + contents = self.ioctx.read("foo") + eq(contents, b"barbaz") + with lock: + while count[0] < 4: + lock.wait() + eq(comp.get_return_value(), 0) + eq(comp2.get_return_value(), 0) + [i.remove() for i in self.ioctx.list_objects()] + + def test_aio_write_full(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + self.ioctx.aio_write("foo", b"barbaz", 0, cb, cb) + comp = self.ioctx.aio_write_full("foo", b"bar", cb, cb) + comp.wait_for_complete() + with lock: + while count[0] < 2: + lock.wait() + eq(comp.get_return_value(), 0) + contents = self.ioctx.read("foo") + eq(contents, b"bar") + [i.remove() for i in self.ioctx.list_objects()] + + def test_aio_writesame(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + comp = self.ioctx.aio_writesame("abc", b"rzx", 9, 0, cb) + comp.wait_for_complete() + with lock: + while count[0] < 1: + lock.wait() + eq(comp.get_return_value(), 0) + eq(self.ioctx.read("abc"), b"rzxrzxrzx") + [i.remove() for i in self.ioctx.list_objects()] + + def test_aio_stat(self): + lock = threading.Condition() + count = [0] + def cb(_, size, mtime): + with lock: + count[0] += 1 + lock.notify() + + comp = self.ioctx.aio_stat("foo", cb) + comp.wait_for_complete() + with lock: + while count[0] < 1: + lock.wait() + eq(comp.get_return_value(), -2) + + self.ioctx.write("foo", b"bar") + + comp = self.ioctx.aio_stat("foo", cb) + comp.wait_for_complete() + with lock: + while count[0] < 2: + lock.wait() + eq(comp.get_return_value(), 0) + + [i.remove() for i in self.ioctx.list_objects()] + + def test_aio_remove(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + self.ioctx.write('foo', b'wrx') + eq(self.ioctx.read('foo'), b'wrx') + comp = self.ioctx.aio_remove('foo', cb, cb) + comp.wait_for_complete() + with lock: + while count[0] < 2: + lock.wait() + eq(comp.get_return_value(), 0) + eq(list(self.ioctx.list_objects()), []) + + def _take_down_acting_set(self, pool, objectname): + # find acting_set for pool:objectname and take it down; used to + # verify that async reads don't complete while acting set is missing + cmd = { + "prefix":"osd map", + "pool":pool, + "object":objectname, + "format":"json", + } + r, jsonout, _ = self.rados.mon_command(json.dumps(cmd), b'') + objmap = json.loads(jsonout.decode("utf-8")) + acting_set = objmap['acting'] + cmd = {"prefix":"osd set", "key":"noup"} + r, _, _ = self.rados.mon_command(json.dumps(cmd), b'') + eq(r, 0) + cmd = {"prefix":"osd down", "ids":[str(i) for i in acting_set]} + r, _, _ = self.rados.mon_command(json.dumps(cmd), b'') + eq(r, 0) + + # wait for OSDs to acknowledge the down + eq(self.rados.wait_for_latest_osdmap(), 0) + + def _let_osds_back_up(self): + cmd = {"prefix":"osd unset", "key":"noup"} + r, _, _ = self.rados.mon_command(json.dumps(cmd), b'') + eq(r, 0) + + def test_aio_read_wait_for_complete(self): + # use wait_for_complete() and wait for cb by + # watching retval[0] + + # this is a list so that the local cb() can modify it + payload = b"bar\000frob" + self.ioctx.write("foo", payload) + self._take_down_acting_set('test_pool', 'foo') + + retval = [None] + lock = threading.Condition() + def cb(_, buf): + with lock: + retval[0] = buf + lock.notify() + + comp = self.ioctx.aio_read("foo", len(payload), 0, cb) + eq(False, comp.is_complete()) + time.sleep(3) + eq(False, comp.is_complete()) + with lock: + eq(None, retval[0]) + + self._let_osds_back_up() + comp.wait_for_complete() + loops = 0 + with lock: + while retval[0] is None and loops <= 10: + lock.wait(timeout=5) + loops += 1 + assert(loops <= 10) + + eq(retval[0], payload) + eq(sys.getrefcount(comp), 2) + + def test_aio_read_wait_for_complete_and_cb(self): + # use wait_for_complete_and_cb(), verify retval[0] is + # set by the time we regain control + payload = b"bar\000frob" + self.ioctx.write("foo", payload) + + self._take_down_acting_set('test_pool', 'foo') + # this is a list so that the local cb() can modify it + retval = [None] + lock = threading.Condition() + def cb(_, buf): + with lock: + retval[0] = buf + lock.notify() + comp = self.ioctx.aio_read("foo", len(payload), 0, cb) + eq(False, comp.is_complete()) + time.sleep(3) + eq(False, comp.is_complete()) + with lock: + eq(None, retval[0]) + + self._let_osds_back_up() + comp.wait_for_complete_and_cb() + assert(retval[0] is not None) + eq(retval[0], payload) + eq(sys.getrefcount(comp), 2) + + def test_aio_read_wait_for_complete_and_cb_error(self): + # error case, use wait_for_complete_and_cb(), verify retval[0] is + # set by the time we regain control + self._take_down_acting_set('test_pool', 'bar') + + # this is a list so that the local cb() can modify it + retval = [1] + lock = threading.Condition() + def cb(_, buf): + with lock: + retval[0] = buf + lock.notify() + + # read from a DNE object + comp = self.ioctx.aio_read("bar", 3, 0, cb) + eq(False, comp.is_complete()) + time.sleep(3) + eq(False, comp.is_complete()) + with lock: + eq(1, retval[0]) + self._let_osds_back_up() + + comp.wait_for_complete_and_cb() + eq(None, retval[0]) + assert(comp.get_return_value() < 0) + eq(sys.getrefcount(comp), 2) + + def test_lock(self): + self.ioctx.lock_exclusive("foo", "lock", "locker", "desc_lock", + 10000, 0) + assert_raises(ObjectExists, + self.ioctx.lock_exclusive, + "foo", "lock", "locker", "desc_lock", 10000, 0) + self.ioctx.unlock("foo", "lock", "locker") + assert_raises(ObjectNotFound, self.ioctx.unlock, "foo", "lock", "locker") + + self.ioctx.lock_shared("foo", "lock", "locker1", "tag", "desc_lock", + 10000, 0) + self.ioctx.lock_shared("foo", "lock", "locker2", "tag", "desc_lock", + 10000, 0) + assert_raises(ObjectBusy, + self.ioctx.lock_exclusive, + "foo", "lock", "locker3", "desc_lock", 10000, 0) + self.ioctx.unlock("foo", "lock", "locker1") + self.ioctx.unlock("foo", "lock", "locker2") + assert_raises(ObjectNotFound, self.ioctx.unlock, "foo", "lock", "locker1") + assert_raises(ObjectNotFound, self.ioctx.unlock, "foo", "lock", "locker2") + + def test_execute(self): + self.ioctx.write("foo", b"") # ensure object exists + + ret, buf = self.ioctx.execute("foo", "hello", "say_hello", b"") + eq(buf, b"Hello, world!") + + ret, buf = self.ioctx.execute("foo", "hello", "say_hello", b"nose") + eq(buf, b"Hello, nose!") + + def test_aio_execute(self): + count = [0] + retval = [None] + lock = threading.Condition() + def cb(_, buf): + with lock: + if retval[0] is None: + retval[0] = buf + count[0] += 1 + lock.notify() + self.ioctx.write("foo", b"") # ensure object exists + + comp = self.ioctx.aio_execute("foo", "hello", "say_hello", b"", 32, cb, cb) + comp.wait_for_complete() + with lock: + while count[0] < 2: + lock.wait() + eq(comp.get_return_value(), 13) + eq(retval[0], b"Hello, world!") + + retval[0] = None + comp = self.ioctx.aio_execute("foo", "hello", "say_hello", b"nose", 32, cb, cb) + comp.wait_for_complete() + with lock: + while count[0] < 4: + lock.wait() + eq(comp.get_return_value(), 12) + eq(retval[0], b"Hello, nose!") + + [i.remove() for i in self.ioctx.list_objects()] + + def test_aio_setxattr(self): + lock = threading.Condition() + count = [0] + def cb(blah): + with lock: + count[0] += 1 + lock.notify() + return 0 + comp = self.ioctx.aio_setxattr("obj", "key", b'value', cb) + comp.wait_for_complete() + with lock: + while count[0] < 1: + lock.wait() + eq(comp.get_return_value(), 0) + eq(self.ioctx.get_xattr("obj", "key"), b'value') + + def test_applications(self): + cmd = {"prefix":"osd dump", "format":"json"} + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'') + eq(ret, 0) + assert len(buf) > 0 + release = json.loads(buf.decode("utf-8")).get("require_osd_release", + None) + if not release or release[0] < 'l': + raise SkipTest + + eq([], self.ioctx.application_list()) + + self.ioctx.application_enable("app1") + assert_raises(Error, self.ioctx.application_enable, "app2") + self.ioctx.application_enable("app2", True) + + assert_raises(Error, self.ioctx.application_metadata_list, "dne") + eq([], self.ioctx.application_metadata_list("app1")) + + assert_raises(Error, self.ioctx.application_metadata_set, "dne", "key", + "key") + self.ioctx.application_metadata_set("app1", "key1", "val1") + eq("val1", self.ioctx.application_metadata_get("app1", "key1")) + self.ioctx.application_metadata_set("app1", "key2", "val2") + eq("val2", self.ioctx.application_metadata_get("app1", "key2")) + self.ioctx.application_metadata_set("app2", "key1", "val1") + eq("val1", self.ioctx.application_metadata_get("app2", "key1")) + + eq([("key1", "val1"), ("key2", "val2")], + self.ioctx.application_metadata_list("app1")) + + self.ioctx.application_metadata_remove("app1", "key1") + eq([("key2", "val2")], self.ioctx.application_metadata_list("app1")) + + def test_service_daemon(self): + name = "pid-" + str(os.getpid()) + metadata = {'version': '3.14', 'memory': '42'} + self.rados.service_daemon_register("laundry", name, metadata) + status = {'result': 'unknown', 'test': 'running'} + self.rados.service_daemon_update(status) + + def test_alignment(self): + eq(self.ioctx.alignment(), None) + + +@attr('ec') +class TestIoctxEc(object): + + def setUp(self): + self.rados = Rados(conffile='') + self.rados.connect() + self.pool = 'test-ec' + self.profile = 'testprofile-%s' % self.pool + cmd = {"prefix": "osd erasure-code-profile set", + "name": self.profile, "profile": ["k=2", "m=1", "crush-failure-domain=osd"]} + ret, buf, out = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, 0, msg=out) + # create ec pool with profile created above + cmd = {'prefix': 'osd pool create', 'pg_num': 8, 'pgp_num': 8, + 'pool': self.pool, 'pool_type': 'erasure', + 'erasure_code_profile': self.profile} + ret, buf, out = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, 0, msg=out) + assert self.rados.pool_exists(self.pool) + self.ioctx = self.rados.open_ioctx(self.pool) + + def tearDown(self): + cmd = {"prefix": "osd unset", "key": "noup"} + self.rados.mon_command(json.dumps(cmd), b'') + self.ioctx.close() + self.rados.delete_pool(self.pool) + self.rados.shutdown() + + def test_alignment(self): + eq(self.ioctx.alignment(), 8192) + + +class TestIoctx2(object): + + def setUp(self): + self.rados = Rados(conffile='') + self.rados.connect() + self.rados.create_pool('test_pool') + assert self.rados.pool_exists('test_pool') + pool_id = self.rados.pool_lookup('test_pool') + assert pool_id > 0 + self.ioctx2 = self.rados.open_ioctx2(pool_id) + + def tearDown(self): + cmd = {"prefix": "osd unset", "key": "noup"} + self.rados.mon_command(json.dumps(cmd), b'') + self.ioctx2.close() + self.rados.delete_pool('test_pool') + self.rados.shutdown() + + def test_get_last_version(self): + version = self.ioctx2.get_last_version() + assert version >= 0 + + def test_get_stats(self): + stats = self.ioctx2.get_stats() + eq(stats, {'num_objects_unfound': 0, + 'num_objects_missing_on_primary': 0, + 'num_object_clones': 0, + 'num_objects': 0, + 'num_object_copies': 0, + 'num_bytes': 0, + 'num_rd_kb': 0, + 'num_wr_kb': 0, + 'num_kb': 0, + 'num_wr': 0, + 'num_objects_degraded': 0, + 'num_rd': 0}) + + +class TestObject(object): + + def setUp(self): + self.rados = Rados(conffile='') + self.rados.connect() + self.rados.create_pool('test_pool') + assert self.rados.pool_exists('test_pool') + self.ioctx = self.rados.open_ioctx('test_pool') + self.ioctx.write('foo', b'bar') + self.object = Object(self.ioctx, 'foo') + + def tearDown(self): + self.ioctx.close() + self.ioctx = None + self.rados.delete_pool('test_pool') + self.rados.shutdown() + self.rados = None + + def test_read(self): + eq(self.object.read(3), b'bar') + eq(self.object.read(100), b'') + + def test_seek(self): + self.object.write(b'blah') + self.object.seek(0) + eq(self.object.read(4), b'blah') + self.object.seek(1) + eq(self.object.read(3), b'lah') + + def test_write(self): + self.object.write(b'barbaz') + self.object.seek(0) + eq(self.object.read(3), b'bar') + eq(self.object.read(3), b'baz') + +@attr('snap') +class TestIoCtxSelfManagedSnaps(object): + def setUp(self): + self.rados = Rados(conffile='') + self.rados.connect() + self.rados.create_pool('test_pool') + assert self.rados.pool_exists('test_pool') + self.ioctx = self.rados.open_ioctx('test_pool') + + def tearDown(self): + cmd = {"prefix":"osd unset", "key":"noup"} + self.rados.mon_command(json.dumps(cmd), b'') + self.ioctx.close() + self.rados.delete_pool('test_pool') + self.rados.shutdown() + + def test(self): + # cannot mix-and-match pool and self-managed snapshot mode + self.ioctx.set_self_managed_snap_write([]) + self.ioctx.write('abc', b'abc') + snap_id_1 = self.ioctx.create_self_managed_snap() + self.ioctx.set_self_managed_snap_write([snap_id_1]) + + self.ioctx.write('abc', b'def') + snap_id_2 = self.ioctx.create_self_managed_snap() + self.ioctx.set_self_managed_snap_write([snap_id_1, snap_id_2]) + + self.ioctx.write('abc', b'ghi') + + self.ioctx.rollback_self_managed_snap('abc', snap_id_1) + eq(self.ioctx.read('abc'), b'abc') + + self.ioctx.rollback_self_managed_snap('abc', snap_id_2) + eq(self.ioctx.read('abc'), b'def') + + self.ioctx.remove_self_managed_snap(snap_id_1) + self.ioctx.remove_self_managed_snap(snap_id_2) + +class TestCommand(object): + + def setUp(self): + self.rados = Rados(conffile='') + self.rados.connect() + + def tearDown(self): + self.rados.shutdown() + + def test_monmap_dump(self): + + # check for success and some plain output with epoch in it + cmd = {"prefix":"mon dump"} + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, 0) + assert len(buf) > 0 + assert(b'epoch' in buf) + + # JSON, and grab current epoch + cmd['format'] = 'json' + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, 0) + assert len(buf) > 0 + d = json.loads(buf.decode("utf-8")) + assert('epoch' in d) + epoch = d['epoch'] + + # assume epoch + 1000 does not exist; test for ENOENT + cmd['epoch'] = epoch + 1000 + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30) + eq(ret, -errno.ENOENT) + eq(len(buf), 0) + del cmd['epoch'] + + # send to specific target by name, rank + cmd = {"prefix": "version"} + + target = d['mons'][0]['name'] + print(target) + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30, + target=target) + eq(ret, 0) + assert len(buf) > 0 + e = json.loads(buf.decode("utf-8")) + assert('release' in e) + + target = d['mons'][0]['rank'] + print(target) + ret, buf, errs = self.rados.mon_command(json.dumps(cmd), b'', timeout=30, + target=target) + eq(ret, 0) + assert len(buf) > 0 + e = json.loads(buf.decode("utf-8")) + assert('release' in e) + + @attr('bench') + def test_osd_bench(self): + cmd = dict(prefix='bench', size=4096, count=8192) + ret, buf, err = self.rados.osd_command(0, json.dumps(cmd), b'', + timeout=30) + eq(ret, 0) + assert len(buf) > 0 + out = json.loads(buf.decode('utf-8')) + eq(out['blocksize'], cmd['size']) + eq(out['bytes_written'], cmd['count']) + + def test_ceph_osd_pool_create_utf8(self): + poolname = "\u9ec5" + + cmd = {"prefix": "osd pool create", "pg_num": 16, "pool": poolname} + ret, buf, out = self.rados.mon_command(json.dumps(cmd), b'') + eq(ret, 0) + assert len(out) > 0 + eq(u"pool '\u9ec5' created", out) + + +@attr('watch') +class TestWatchNotify(object): + OID = "test_watch_notify" + + def setUp(self): + self.rados = Rados(conffile='') + self.rados.connect() + self.rados.create_pool('test_pool') + assert self.rados.pool_exists('test_pool') + self.ioctx = self.rados.open_ioctx('test_pool') + self.ioctx.write(self.OID, b'test watch notify') + self.lock = threading.Condition() + self.notify_cnt = {} + self.notify_data = {} + self.notify_error = {} + # aio related + self.ack_cnt = {} + self.ack_data = {} + self.instance_id = self.rados.get_instance_id() + + def tearDown(self): + self.ioctx.close() + self.rados.delete_pool('test_pool') + self.rados.shutdown() + + def make_callback(self): + def callback(notify_id, notifier_id, watch_id, data): + with self.lock: + if watch_id not in self.notify_cnt: + self.notify_cnt[watch_id] = 1 + elif self.notify_data[watch_id] != data: + self.notify_cnt[watch_id] += 1 + self.notify_data[watch_id] = data + return callback + + def make_error_callback(self): + def callback(watch_id, error): + with self.lock: + self.notify_error[watch_id] = error + return callback + + + def test(self): + with self.ioctx.watch(self.OID, self.make_callback(), + self.make_error_callback()) as watch1: + watch_id1 = watch1.get_id() + assert(watch_id1 > 0) + + with self.rados.open_ioctx('test_pool') as ioctx: + watch2 = ioctx.watch(self.OID, self.make_callback(), + self.make_error_callback()) + watch_id2 = watch2.get_id() + assert(watch_id2 > 0) + + assert(self.ioctx.notify(self.OID, 'test')) + with self.lock: + assert(watch_id1 in self.notify_cnt) + assert(watch_id2 in self.notify_cnt) + eq(self.notify_cnt[watch_id1], 1) + eq(self.notify_cnt[watch_id2], 1) + eq(self.notify_data[watch_id1], b'test') + eq(self.notify_data[watch_id2], b'test') + + assert(watch1.check() >= timedelta()) + assert(watch2.check() >= timedelta()) + + assert(self.ioctx.notify(self.OID, 'best')) + with self.lock: + eq(self.notify_cnt[watch_id1], 2) + eq(self.notify_cnt[watch_id2], 2) + eq(self.notify_data[watch_id1], b'best') + eq(self.notify_data[watch_id2], b'best') + + watch2.close() + + assert(self.ioctx.notify(self.OID, 'rest')) + with self.lock: + eq(self.notify_cnt[watch_id1], 3) + eq(self.notify_cnt[watch_id2], 2) + eq(self.notify_data[watch_id1], b'rest') + eq(self.notify_data[watch_id2], b'best') + + assert(watch1.check() >= timedelta()) + + self.ioctx.remove_object(self.OID) + + for i in range(10): + with self.lock: + if watch_id1 in self.notify_error: + break + time.sleep(1) + eq(self.notify_error[watch_id1], -errno.ENOTCONN) + assert_raises(NotConnected, watch1.check) + + assert_raises(ObjectNotFound, self.ioctx.notify, self.OID, 'test') + + def make_callback_reply(self): + def callback(notify_id, notifier_id, watch_id, data): + with self.lock: + return data + return callback + + def notify_callback(self, _, r, ack_list, timeout_list): + eq(r, 0) + with self.lock: + for notifier_id, _, notifier_data in ack_list: + if notifier_id not in self.ack_cnt: + self.ack_cnt[notifier_id] = 0 + self.ack_cnt[notifier_id] += 1 + self.ack_data[notifier_id] = notifier_data + + def notify_callback_err(self, _, r, ack_list, timeout_list): + eq(r, -errno.ENOENT) + + def test_aio_notify(self): + with self.ioctx.watch(self.OID, self.make_callback_reply(), + self.make_error_callback()) as watch1: + watch_id1 = watch1.get_id() + ok(watch_id1 > 0) + + with self.rados.open_ioctx('test_pool') as ioctx: + watch2 = ioctx.watch(self.OID, self.make_callback_reply(), + self.make_error_callback()) + watch_id2 = watch2.get_id() + ok(watch_id2 > 0) + + comp = self.ioctx.aio_notify(self.OID, self.notify_callback, msg='test') + comp.wait_for_complete_and_cb() + with self.lock: + ok(self.instance_id in self.ack_cnt) + eq(self.ack_cnt[self.instance_id], 2) + eq(self.ack_data[self.instance_id], b'test') + + ok(watch1.check() >= timedelta()) + ok(watch2.check() >= timedelta()) + + comp = self.ioctx.aio_notify(self.OID, self.notify_callback, msg='best') + comp.wait_for_complete_and_cb() + with self.lock: + eq(self.ack_cnt[self.instance_id], 4) + eq(self.ack_data[self.instance_id], b'best') + + watch2.close() + + comp = self.ioctx.aio_notify(self.OID, self.notify_callback, msg='rest') + comp.wait_for_complete_and_cb() + with self.lock: + eq(self.ack_cnt[self.instance_id], 5) + eq(self.ack_data[self.instance_id], b'rest') + + assert(watch1.check() >= timedelta()) + self.ioctx.remove_object(self.OID) + + for i in range(10): + with self.lock: + if watch_id1 in self.notify_error: + break + time.sleep(1) + eq(self.notify_error[watch_id1], -errno.ENOTCONN) + assert_raises(NotConnected, watch1.check) + + comp = self.ioctx.aio_notify(self.OID, self.notify_callback_err, msg='test') + comp.wait_for_complete_and_cb() diff --git a/src/test/pybind/test_rbd.py b/src/test/pybind/test_rbd.py new file mode 100644 index 000000000..75a193811 --- /dev/null +++ b/src/test/pybind/test_rbd.py @@ -0,0 +1,2771 @@ +# vim: expandtab smarttab shiftwidth=4 softtabstop=4 +import base64 +import copy +import errno +import functools +import json +import socket +import os +import platform +import time +import sys + +from datetime import datetime, timedelta +from nose import with_setup, SkipTest +from nose.tools import eq_ as eq, assert_raises, assert_not_equal +from rados import (Rados, + LIBRADOS_OP_FLAG_FADVISE_DONTNEED, + LIBRADOS_OP_FLAG_FADVISE_NOCACHE, + LIBRADOS_OP_FLAG_FADVISE_RANDOM) +from rbd import (RBD, Group, Image, ImageNotFound, InvalidArgument, ImageExists, + ImageBusy, ImageHasSnapshots, ReadOnlyImage, + FunctionNotSupported, ArgumentOutOfRange, + ECANCELED, OperationCanceled, + DiskQuotaExceeded, ConnectionShutdown, PermissionError, + RBD_FEATURE_LAYERING, RBD_FEATURE_STRIPINGV2, + RBD_FEATURE_EXCLUSIVE_LOCK, RBD_FEATURE_JOURNALING, + RBD_FEATURE_DEEP_FLATTEN, RBD_FEATURE_FAST_DIFF, + RBD_FEATURE_OBJECT_MAP, + RBD_MIRROR_MODE_DISABLED, RBD_MIRROR_MODE_IMAGE, + RBD_MIRROR_MODE_POOL, RBD_MIRROR_IMAGE_ENABLED, + RBD_MIRROR_IMAGE_DISABLED, MIRROR_IMAGE_STATUS_STATE_UNKNOWN, + RBD_MIRROR_IMAGE_MODE_JOURNAL, RBD_MIRROR_IMAGE_MODE_SNAPSHOT, + RBD_LOCK_MODE_EXCLUSIVE, RBD_OPERATION_FEATURE_GROUP, + RBD_SNAP_NAMESPACE_TYPE_TRASH, + RBD_SNAP_NAMESPACE_TYPE_MIRROR, + RBD_IMAGE_MIGRATION_STATE_PREPARED, RBD_CONFIG_SOURCE_CONFIG, + RBD_CONFIG_SOURCE_POOL, RBD_CONFIG_SOURCE_IMAGE, + RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST, + RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY, + RBD_MIRROR_PEER_DIRECTION_RX, RBD_MIRROR_PEER_DIRECTION_RX_TX, + RBD_SNAP_REMOVE_UNPROTECT, RBD_SNAP_MIRROR_STATE_PRIMARY, + RBD_SNAP_MIRROR_STATE_PRIMARY_DEMOTED, + RBD_SNAP_CREATE_SKIP_QUIESCE, + RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR, + RBD_WRITE_ZEROES_FLAG_THICK_PROVISION, + RBD_ENCRYPTION_FORMAT_LUKS1, RBD_ENCRYPTION_FORMAT_LUKS2) + +rados = None +ioctx = None +features = None +image_idx = 0 +group_idx = 0 +snap_idx = 0 +image_name = None +group_name = None +snap_name = None +pool_idx = 0 +pool_name = None +IMG_SIZE = 8 << 20 # 8 MiB +IMG_ORDER = 22 # 4 MiB objects + +os.environ["RBD_FORCE_ALLOW_V1"] = "1" + +def setup_module(): + global rados + rados = Rados(conffile='') + rados.connect() + global pool_name + pool_name = get_temp_pool_name() + rados.create_pool(pool_name) + global ioctx + ioctx = rados.open_ioctx(pool_name) + RBD().pool_init(ioctx, True) + global features + features = os.getenv("RBD_FEATURES") + features = int(features) if features is not None else 61 + +def teardown_module(): + global ioctx + ioctx.close() + global rados + rados.delete_pool(pool_name) + rados.shutdown() + +def get_temp_pool_name(): + global pool_idx + pool_idx += 1 + return "test-rbd-api-" + socket.gethostname() + '-' + str(os.getpid()) + \ + '-' + str(pool_idx) + +def get_temp_image_name(): + global image_idx + image_idx += 1 + return "image" + str(image_idx) + +def get_temp_group_name(): + global group_idx + group_idx += 1 + return "group" + str(group_idx) + +def get_temp_snap_name(): + global snap_idx + snap_idx += 1 + return "snap" + str(snap_idx) + +def create_image(): + global image_name + image_name = get_temp_image_name() + if features is not None: + RBD().create(ioctx, image_name, IMG_SIZE, IMG_ORDER, old_format=False, + features=int(features)) + else: + RBD().create(ioctx, image_name, IMG_SIZE, IMG_ORDER, old_format=True) + return image_name + +def remove_image(): + if image_name is not None: + RBD().remove(ioctx, image_name) + +def create_group(): + global group_name + group_name = get_temp_group_name() + RBD().group_create(ioctx, group_name) + +def remove_group(): + if group_name is not None: + RBD().group_remove(ioctx, group_name) + +def rename_group(): + new_group_name = "new" + group_name + RBD().group_rename(ioctx, group_name, new_group_name) + +def require_new_format(): + def wrapper(fn): + def _require_new_format(*args, **kwargs): + global features + if features is None: + raise SkipTest + return fn(*args, **kwargs) + return functools.wraps(fn)(_require_new_format) + return wrapper + +def require_features(required_features): + def wrapper(fn): + def _require_features(*args, **kwargs): + global features + if features is None: + raise SkipTest + for feature in required_features: + if feature & features != feature: + raise SkipTest + return fn(*args, **kwargs) + return functools.wraps(fn)(_require_features) + return wrapper + +def require_linux(): + def wrapper(fn): + def _require_linux(*args, **kwargs): + if platform.system() != "Linux": + raise SkipTest + return fn(*args, **kwargs) + return functools.wraps(fn)(_require_linux) + return wrapper + +def blocklist_features(blocklisted_features): + def wrapper(fn): + def _blocklist_features(*args, **kwargs): + global features + for feature in blocklisted_features: + if features is not None and feature & features == feature: + raise SkipTest + return fn(*args, **kwargs) + return functools.wraps(fn)(_blocklist_features) + return wrapper + +def test_version(): + RBD().version() + +def test_create(): + create_image() + remove_image() + +def check_default_params(format, order=None, features=None, stripe_count=None, + stripe_unit=None, exception=None): + global rados + global ioctx + orig_vals = {} + for k in ['rbd_default_format', 'rbd_default_order', 'rbd_default_features', + 'rbd_default_stripe_count', 'rbd_default_stripe_unit']: + orig_vals[k] = rados.conf_get(k) + try: + rados.conf_set('rbd_default_format', str(format)) + if order is not None: + rados.conf_set('rbd_default_order', str(order or 0)) + if features is not None: + rados.conf_set('rbd_default_features', str(features or 0)) + if stripe_count is not None: + rados.conf_set('rbd_default_stripe_count', str(stripe_count or 0)) + if stripe_unit is not None: + rados.conf_set('rbd_default_stripe_unit', str(stripe_unit or 0)) + feature_data_pool = 0 + datapool = rados.conf_get('rbd_default_data_pool') + if not len(datapool) == 0: + feature_data_pool = 128 + image_name = get_temp_image_name() + if exception is None: + RBD().create(ioctx, image_name, IMG_SIZE, old_format=(format == 1)) + try: + with Image(ioctx, image_name) as image: + eq(format == 1, image.old_format()) + + expected_order = int(rados.conf_get('rbd_default_order')) + actual_order = image.stat()['order'] + eq(expected_order, actual_order) + + expected_features = features + if format == 1: + expected_features = 0 + elif expected_features is None: + expected_features = 61 | feature_data_pool + else: + expected_features |= feature_data_pool + eq(expected_features, image.features()) + + expected_stripe_count = stripe_count + if not expected_stripe_count or format == 1 or \ + features & RBD_FEATURE_STRIPINGV2 == 0: + expected_stripe_count = 1 + eq(expected_stripe_count, image.stripe_count()) + + expected_stripe_unit = stripe_unit + if not expected_stripe_unit or format == 1 or \ + features & RBD_FEATURE_STRIPINGV2 == 0: + expected_stripe_unit = 1 << actual_order + eq(expected_stripe_unit, image.stripe_unit()) + finally: + RBD().remove(ioctx, image_name) + else: + assert_raises(exception, RBD().create, ioctx, image_name, IMG_SIZE) + finally: + for k, v in orig_vals.items(): + rados.conf_set(k, v) + +def test_create_defaults(): + # basic format 1 and 2 + check_default_params(1) + check_default_params(2) + # invalid order + check_default_params(1, 0, exception=ArgumentOutOfRange) + check_default_params(2, 0, exception=ArgumentOutOfRange) + check_default_params(1, 11, exception=ArgumentOutOfRange) + check_default_params(2, 11, exception=ArgumentOutOfRange) + check_default_params(1, 65, exception=ArgumentOutOfRange) + check_default_params(2, 65, exception=ArgumentOutOfRange) + # striping and features are ignored for format 1 + check_default_params(1, 20, 0, 1, 1) + check_default_params(1, 20, 3, 1, 1) + check_default_params(1, 20, 0, 0, 0) + # striping is ignored if stripingv2 is not set + check_default_params(2, 20, 0, 1, 1 << 20) + check_default_params(2, 20, RBD_FEATURE_LAYERING, 1, 1 << 20) + check_default_params(2, 20, 0, 0, 0) + # striping with stripingv2 is fine + check_default_params(2, 20, RBD_FEATURE_STRIPINGV2, 1, 1 << 16) + check_default_params(2, 20, RBD_FEATURE_STRIPINGV2, 10, 1 << 20) + check_default_params(2, 20, RBD_FEATURE_STRIPINGV2, 10, 1 << 16) + check_default_params(2, 20, 0, 0, 0) + # make sure invalid combinations of stripe unit and order are still invalid + check_default_params(2, 22, RBD_FEATURE_STRIPINGV2, 10, 1 << 50, exception=InvalidArgument) + check_default_params(2, 22, RBD_FEATURE_STRIPINGV2, 10, 100, exception=InvalidArgument) + check_default_params(2, 22, RBD_FEATURE_STRIPINGV2, 0, 1, exception=InvalidArgument) + check_default_params(2, 22, RBD_FEATURE_STRIPINGV2, 1, 0, exception=InvalidArgument) + # 0 stripe unit and count are still ignored + check_default_params(2, 22, 0, 0, 0) + +def test_context_manager(): + with Rados(conffile='') as cluster: + with cluster.open_ioctx(pool_name) as ioctx: + image_name = get_temp_image_name() + RBD().create(ioctx, image_name, IMG_SIZE) + with Image(ioctx, image_name) as image: + data = rand_data(256) + image.write(data, 0) + read = image.read(0, 256) + RBD().remove(ioctx, image_name) + eq(data, read) + +def test_open_read_only(): + with Rados(conffile='') as cluster: + with cluster.open_ioctx(pool_name) as ioctx: + image_name = get_temp_image_name() + RBD().create(ioctx, image_name, IMG_SIZE) + data = rand_data(256) + with Image(ioctx, image_name) as image: + image.write(data, 0) + image.create_snap('snap') + with Image(ioctx, image_name, read_only=True) as image: + read = image.read(0, 256) + eq(data, read) + assert_raises(ReadOnlyImage, image.write, data, 0) + assert_raises(ReadOnlyImage, image.create_snap, 'test') + assert_raises(ReadOnlyImage, image.remove_snap, 'snap') + assert_raises(ReadOnlyImage, image.rollback_to_snap, 'snap') + assert_raises(ReadOnlyImage, image.protect_snap, 'snap') + assert_raises(ReadOnlyImage, image.unprotect_snap, 'snap') + assert_raises(ReadOnlyImage, image.unprotect_snap, 'snap') + assert_raises(ReadOnlyImage, image.flatten) + with Image(ioctx, image_name) as image: + image.remove_snap('snap') + RBD().remove(ioctx, image_name) + eq(data, read) + +def test_open_dne(): + for i in range(100): + image_name = get_temp_image_name() + assert_raises(ImageNotFound, Image, ioctx, image_name + 'dne') + assert_raises(ImageNotFound, Image, ioctx, image_name, 'snap') + +def test_open_readonly_dne(): + for i in range(100): + image_name = get_temp_image_name() + assert_raises(ImageNotFound, Image, ioctx, image_name + 'dne', + read_only=True) + assert_raises(ImageNotFound, Image, ioctx, image_name, 'snap', + read_only=True) + +@require_new_format() +def test_open_by_id(): + with Rados(conffile='') as cluster: + with cluster.open_ioctx(pool_name) as ioctx: + image_name = get_temp_image_name() + RBD().create(ioctx, image_name, IMG_SIZE) + with Image(ioctx, image_name) as image: + image_id = image.id() + with Image(ioctx, image_id=image_id) as image: + eq(image.get_name(), image_name) + RBD().remove(ioctx, image_name) + +def test_aio_open(): + with Rados(conffile='') as cluster: + with cluster.open_ioctx(pool_name) as ioctx: + image_name = get_temp_image_name() + order = 20 + RBD().create(ioctx, image_name, IMG_SIZE, order) + + # this is a list so that the open_cb() can modify it + image = [None] + def open_cb(_, image_): + image[0] = image_ + + comp = RBD().aio_open_image(open_cb, ioctx, image_name) + comp.wait_for_complete_and_cb() + eq(comp.get_return_value(), 0) + eq(sys.getrefcount(comp), 2) + assert_not_equal(image[0], None) + + image = image[0] + eq(image.get_name(), image_name) + check_stat(image.stat(), IMG_SIZE, order) + + closed = [False] + def close_cb(_): + closed[0] = True + + comp = image.aio_close(close_cb) + comp.wait_for_complete_and_cb() + eq(comp.get_return_value(), 0) + eq(sys.getrefcount(comp), 2) + eq(closed[0], True) + + RBD().remove(ioctx, image_name) + +def test_remove_dne(): + assert_raises(ImageNotFound, remove_image) + +def test_list_empty(): + eq([], RBD().list(ioctx)) + +@with_setup(create_image, remove_image) +def test_list(): + eq([image_name], RBD().list(ioctx)) + + with Image(ioctx, image_name) as image: + image_id = image.id() + eq([{'id': image_id, 'name': image_name}], list(RBD().list2(ioctx))) + +@with_setup(create_image) +def test_remove_with_progress(): + d = {'received_callback': False} + def progress_cb(current, total): + d['received_callback'] = True + return 0 + + RBD().remove(ioctx, image_name, on_progress=progress_cb) + eq(True, d['received_callback']) + +@with_setup(create_image) +def test_remove_canceled(): + def progress_cb(current, total): + return -ECANCELED + + assert_raises(OperationCanceled, RBD().remove, ioctx, image_name, + on_progress=progress_cb) + +@with_setup(create_image, remove_image) +def test_rename(): + rbd = RBD() + image_name2 = get_temp_image_name() + rbd.rename(ioctx, image_name, image_name2) + eq([image_name2], rbd.list(ioctx)) + rbd.rename(ioctx, image_name2, image_name) + eq([image_name], rbd.list(ioctx)) + +def test_pool_metadata(): + rbd = RBD() + metadata = list(rbd.pool_metadata_list(ioctx)) + eq(len(metadata), 0) + assert_raises(KeyError, rbd.pool_metadata_get, ioctx, "key1") + rbd.pool_metadata_set(ioctx, "key1", "value1") + rbd.pool_metadata_set(ioctx, "key2", "value2") + value = rbd.pool_metadata_get(ioctx, "key1") + eq(value, "value1") + value = rbd.pool_metadata_get(ioctx, "key2") + eq(value, "value2") + metadata = list(rbd.pool_metadata_list(ioctx)) + eq(len(metadata), 2) + rbd.pool_metadata_remove(ioctx, "key1") + metadata = list(rbd.pool_metadata_list(ioctx)) + eq(len(metadata), 1) + eq(metadata[0], ("key2", "value2")) + rbd.pool_metadata_remove(ioctx, "key2") + assert_raises(KeyError, rbd.pool_metadata_remove, ioctx, "key2") + metadata = list(rbd.pool_metadata_list(ioctx)) + eq(len(metadata), 0) + + N = 65 + for i in range(N): + rbd.pool_metadata_set(ioctx, "key" + str(i), "X" * 1025) + metadata = list(rbd.pool_metadata_list(ioctx)) + eq(len(metadata), N) + for i in range(N): + rbd.pool_metadata_remove(ioctx, "key" + str(i)) + metadata = list(rbd.pool_metadata_list(ioctx)) + eq(len(metadata), N - i - 1) + +def test_config_list(): + rbd = RBD() + + for option in rbd.config_list(ioctx): + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + + rbd.pool_metadata_set(ioctx, "conf_rbd_cache", "true") + + for option in rbd.config_list(ioctx): + if option['name'] == "rbd_cache": + eq(option['source'], RBD_CONFIG_SOURCE_POOL) + else: + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + + rbd.pool_metadata_remove(ioctx, "conf_rbd_cache") + + for option in rbd.config_list(ioctx): + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + +def test_pool_config_set_and_get_and_remove(): + rbd = RBD() + + for option in rbd.config_list(ioctx): + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + + rbd.config_set(ioctx, "rbd_request_timed_out_seconds", "100") + new_value = rbd.config_get(ioctx, "rbd_request_timed_out_seconds") + eq(new_value, "100") + rbd.config_remove(ioctx, "rbd_request_timed_out_seconds") + + for option in rbd.config_list(ioctx): + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + +def test_namespaces(): + rbd = RBD() + + eq(False, rbd.namespace_exists(ioctx, 'ns1')) + eq([], rbd.namespace_list(ioctx)) + assert_raises(ImageNotFound, rbd.namespace_remove, ioctx, 'ns1') + + rbd.namespace_create(ioctx, 'ns1') + eq(True, rbd.namespace_exists(ioctx, 'ns1')) + + assert_raises(ImageExists, rbd.namespace_create, ioctx, 'ns1') + eq(['ns1'], rbd.namespace_list(ioctx)) + rbd.namespace_remove(ioctx, 'ns1') + eq([], rbd.namespace_list(ioctx)) + +@require_new_format() +def test_pool_stats(): + rbd = RBD() + + try: + image1 = create_image() + image2 = create_image() + image3 = create_image() + image4 = create_image() + with Image(ioctx, image4) as image: + image.create_snap('snap') + image.resize(0) + + stats = rbd.pool_stats_get(ioctx) + eq(stats['image_count'], 4) + eq(stats['image_provisioned_bytes'], 3 * IMG_SIZE) + eq(stats['image_max_provisioned_bytes'], 4 * IMG_SIZE) + eq(stats['image_snap_count'], 1) + eq(stats['trash_count'], 0) + eq(stats['trash_provisioned_bytes'], 0) + eq(stats['trash_max_provisioned_bytes'], 0) + eq(stats['trash_snap_count'], 0) + finally: + rbd.remove(ioctx, image1) + rbd.remove(ioctx, image2) + rbd.remove(ioctx, image3) + with Image(ioctx, image4) as image: + image.remove_snap('snap') + rbd.remove(ioctx, image4) + +def rand_data(size): + return os.urandom(size) + +def check_stat(info, size, order): + assert 'block_name_prefix' in info + eq(info['size'], size) + eq(info['order'], order) + eq(info['num_objs'], size // (1 << order)) + eq(info['obj_size'], 1 << order) + +@require_new_format() +def test_features_to_string(): + rbd = RBD() + features = RBD_FEATURE_DEEP_FLATTEN | RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_FAST_DIFF \ + | RBD_FEATURE_LAYERING | RBD_FEATURE_OBJECT_MAP + expected_features_string = "deep-flatten,exclusive-lock,fast-diff,layering,object-map" + features_string = rbd.features_to_string(features) + eq(expected_features_string, features_string) + + features = RBD_FEATURE_LAYERING + features_string = rbd.features_to_string(features) + eq(features_string, "layering") + + features = 16777216 + assert_raises(InvalidArgument, rbd.features_to_string, features) + +@require_new_format() +def test_features_from_string(): + rbd = RBD() + features_string = "deep-flatten,exclusive-lock,fast-diff,layering,object-map" + expected_features_bitmask = RBD_FEATURE_DEEP_FLATTEN | RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_FAST_DIFF \ + | RBD_FEATURE_LAYERING | RBD_FEATURE_OBJECT_MAP + features = rbd.features_from_string(features_string) + eq(expected_features_bitmask, features) + + features_string = "layering" + features = rbd.features_from_string(features_string) + eq(features, RBD_FEATURE_LAYERING) + +class TestImage(object): + + def setUp(self): + self.rbd = RBD() + create_image() + self.image = Image(ioctx, image_name) + + def tearDown(self): + self.image.close() + remove_image() + self.image = None + + @require_new_format() + @blocklist_features([RBD_FEATURE_EXCLUSIVE_LOCK]) + def test_update_features(self): + features = self.image.features() + self.image.update_features(RBD_FEATURE_EXCLUSIVE_LOCK, True) + eq(features | RBD_FEATURE_EXCLUSIVE_LOCK, self.image.features()) + + @require_features([RBD_FEATURE_STRIPINGV2]) + def test_create_with_params(self): + global features + image_name = get_temp_image_name() + order = 20 + stripe_unit = 1 << 20 + stripe_count = 10 + self.rbd.create(ioctx, image_name, IMG_SIZE, order, + False, features, stripe_unit, stripe_count) + image = Image(ioctx, image_name) + info = image.stat() + check_stat(info, IMG_SIZE, order) + eq(image.features(), features) + eq(image.stripe_unit(), stripe_unit) + eq(image.stripe_count(), stripe_count) + image.close() + RBD().remove(ioctx, image_name) + + @require_new_format() + def test_id(self): + assert_not_equal(b'', self.image.id()) + + def test_block_name_prefix(self): + assert_not_equal(b'', self.image.block_name_prefix()) + + def test_create_timestamp(self): + timestamp = self.image.create_timestamp() + assert_not_equal(0, timestamp.year) + assert_not_equal(1970, timestamp.year) + + def test_access_timestamp(self): + timestamp = self.image.access_timestamp() + assert_not_equal(0, timestamp.year) + assert_not_equal(1970, timestamp.year) + + def test_modify_timestamp(self): + timestamp = self.image.modify_timestamp() + assert_not_equal(0, timestamp.year) + assert_not_equal(1970, timestamp.year) + + def test_invalidate_cache(self): + self.image.write(b'abc', 0) + eq(b'abc', self.image.read(0, 3)) + self.image.invalidate_cache() + eq(b'abc', self.image.read(0, 3)) + + def test_stat(self): + info = self.image.stat() + check_stat(info, IMG_SIZE, IMG_ORDER) + + def test_flags(self): + flags = self.image.flags() + eq(0, flags) + + def test_image_auto_close(self): + image = Image(ioctx, image_name) + + def test_use_after_close(self): + self.image.close() + assert_raises(InvalidArgument, self.image.stat) + + def test_write(self): + data = rand_data(256) + self.image.write(data, 0) + + def test_write_with_fadvise_flags(self): + data = rand_data(256) + self.image.write(data, 0, LIBRADOS_OP_FLAG_FADVISE_DONTNEED) + self.image.write(data, 0, LIBRADOS_OP_FLAG_FADVISE_NOCACHE) + + def test_write_zeroes(self): + data = rand_data(256) + self.image.write(data, 0) + self.image.write_zeroes(0, 256) + eq(self.image.read(256, 256), b'\0' * 256) + check_diff(self.image, 0, IMG_SIZE, None, []) + + def test_write_zeroes_thick_provision(self): + data = rand_data(256) + self.image.write(data, 0) + self.image.write_zeroes(0, 256, RBD_WRITE_ZEROES_FLAG_THICK_PROVISION) + eq(self.image.read(256, 256), b'\0' * 256) + check_diff(self.image, 0, IMG_SIZE, None, [(0, 256, True)]) + + def test_read(self): + data = self.image.read(0, 20) + eq(data, b'\0' * 20) + + def test_read_with_fadvise_flags(self): + data = self.image.read(0, 20, LIBRADOS_OP_FLAG_FADVISE_DONTNEED) + eq(data, b'\0' * 20) + data = self.image.read(0, 20, LIBRADOS_OP_FLAG_FADVISE_RANDOM) + eq(data, b'\0' * 20) + + def test_large_write(self): + data = rand_data(IMG_SIZE) + self.image.write(data, 0) + + def test_large_read(self): + data = self.image.read(0, IMG_SIZE) + eq(data, b'\0' * IMG_SIZE) + + def test_write_read(self): + data = rand_data(256) + offset = 50 + self.image.write(data, offset) + read = self.image.read(offset, 256) + eq(data, read) + + def test_read_bad_offset(self): + assert_raises(InvalidArgument, self.image.read, IMG_SIZE + 1, IMG_SIZE) + + def test_resize(self): + new_size = IMG_SIZE * 2 + self.image.resize(new_size) + info = self.image.stat() + check_stat(info, new_size, IMG_ORDER) + + def test_resize_allow_shrink_False(self): + new_size = IMG_SIZE * 2 + self.image.resize(new_size) + info = self.image.stat() + check_stat(info, new_size, IMG_ORDER) + assert_raises(InvalidArgument, self.image.resize, IMG_SIZE, False) + + def test_size(self): + eq(IMG_SIZE, self.image.size()) + self.image.create_snap('snap1') + new_size = IMG_SIZE * 2 + self.image.resize(new_size) + eq(new_size, self.image.size()) + self.image.create_snap('snap2') + self.image.set_snap('snap2') + eq(new_size, self.image.size()) + self.image.set_snap('snap1') + eq(IMG_SIZE, self.image.size()) + self.image.set_snap(None) + eq(new_size, self.image.size()) + self.image.remove_snap('snap1') + self.image.remove_snap('snap2') + + def test_resize_down(self): + new_size = IMG_SIZE // 2 + data = rand_data(256) + self.image.write(data, IMG_SIZE // 2); + self.image.resize(new_size) + self.image.resize(IMG_SIZE) + read = self.image.read(IMG_SIZE // 2, 256) + eq(b'\0' * 256, read) + + def test_resize_bytes(self): + new_size = IMG_SIZE // 2 - 5 + data = rand_data(256) + self.image.write(data, IMG_SIZE // 2 - 10); + self.image.resize(new_size) + self.image.resize(IMG_SIZE) + read = self.image.read(IMG_SIZE // 2 - 10, 5) + eq(data[:5], read) + read = self.image.read(IMG_SIZE // 2 - 5, 251) + eq(b'\0' * 251, read) + + def _test_copy(self, features=None, order=None, stripe_unit=None, + stripe_count=None): + global ioctx + data = rand_data(256) + self.image.write(data, 256) + image_name = get_temp_image_name() + if features is None: + self.image.copy(ioctx, image_name) + elif order is None: + self.image.copy(ioctx, image_name, features) + elif stripe_unit is None: + self.image.copy(ioctx, image_name, features, order) + elif stripe_count is None: + self.image.copy(ioctx, image_name, features, order, stripe_unit) + else: + self.image.copy(ioctx, image_name, features, order, stripe_unit, + stripe_count) + assert_raises(ImageExists, self.image.copy, ioctx, image_name) + copy = Image(ioctx, image_name) + copy_data = copy.read(256, 256) + copy.close() + self.rbd.remove(ioctx, image_name) + eq(data, copy_data) + + def test_copy(self): + self._test_copy() + + def test_copy2(self): + self._test_copy(self.image.features(), self.image.stat()['order']) + + @require_features([RBD_FEATURE_STRIPINGV2]) + def test_copy3(self): + global features + self._test_copy(features, self.image.stat()['order'], + self.image.stripe_unit(), self.image.stripe_count()) + + def test_deep_copy(self): + global ioctx + global features + self.image.write(b'a' * 256, 0) + self.image.create_snap('snap1') + self.image.write(b'b' * 256, 0) + dst_name = get_temp_image_name() + self.image.deep_copy(ioctx, dst_name, features=features, + order=self.image.stat()['order'], + stripe_unit=self.image.stripe_unit(), + stripe_count=self.image.stripe_count(), + data_pool=None) + self.image.remove_snap('snap1') + with Image(ioctx, dst_name, 'snap1') as copy: + copy_data = copy.read(0, 256) + eq(b'a' * 256, copy_data) + with Image(ioctx, dst_name) as copy: + copy_data = copy.read(0, 256) + eq(b'b' * 256, copy_data) + copy.remove_snap('snap1') + self.rbd.remove(ioctx, dst_name) + + @require_features([RBD_FEATURE_LAYERING]) + def test_deep_copy_clone(self): + global ioctx + global features + self.image.write(b'a' * 256, 0) + self.image.create_snap('snap1') + self.image.write(b'b' * 256, 0) + self.image.protect_snap('snap1') + clone_name = get_temp_image_name() + dst_name = get_temp_image_name() + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name) + with Image(ioctx, clone_name) as child: + child.create_snap('snap1') + child.deep_copy(ioctx, dst_name, features=features, + order=self.image.stat()['order'], + stripe_unit=self.image.stripe_unit(), + stripe_count=self.image.stripe_count(), + data_pool=None) + child.remove_snap('snap1') + + with Image(ioctx, dst_name) as copy: + copy_data = copy.read(0, 256) + eq(b'a' * 256, copy_data) + copy.remove_snap('snap1') + self.rbd.remove(ioctx, dst_name) + self.rbd.remove(ioctx, clone_name) + self.image.unprotect_snap('snap1') + self.image.remove_snap('snap1') + + def test_create_snap(self): + global ioctx + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + at_snapshot = Image(ioctx, image_name, 'snap1') + snap_data = at_snapshot.read(0, 256) + at_snapshot.close() + eq(snap_data, b'\0' * 256) + self.image.remove_snap('snap1') + + def test_create_snap_exists(self): + self.image.create_snap('snap1') + assert_raises(ImageExists, self.image.create_snap, 'snap1') + self.image.remove_snap('snap1') + + def test_create_snap_flags(self): + self.image.create_snap('snap1', 0) + self.image.remove_snap('snap1') + self.image.create_snap('snap1', RBD_SNAP_CREATE_SKIP_QUIESCE) + self.image.remove_snap('snap1') + self.image.create_snap('snap1', RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR) + self.image.remove_snap('snap1') + + def test_list_snaps(self): + eq([], list(self.image.list_snaps())) + self.image.create_snap('snap1') + eq(['snap1'], [snap['name'] for snap in self.image.list_snaps()]) + self.image.create_snap('snap2') + eq(['snap1', 'snap2'], [snap['name'] for snap in self.image.list_snaps()]) + self.image.remove_snap('snap1') + self.image.remove_snap('snap2') + + def test_list_snaps_iterator_auto_close(self): + self.image.create_snap('snap1') + self.image.list_snaps() + self.image.remove_snap('snap1') + + def test_remove_snap(self): + eq([], list(self.image.list_snaps())) + self.image.create_snap('snap1') + eq(['snap1'], [snap['name'] for snap in self.image.list_snaps()]) + self.image.remove_snap('snap1') + eq([], list(self.image.list_snaps())) + + def test_remove_snap_not_found(self): + assert_raises(ImageNotFound, self.image.remove_snap, 'snap1') + + @require_features([RBD_FEATURE_LAYERING]) + def test_remove_snap2(self): + self.image.create_snap('snap1') + self.image.protect_snap('snap1') + assert(self.image.is_protected_snap('snap1')) + self.image.remove_snap2('snap1', RBD_SNAP_REMOVE_UNPROTECT) + eq([], list(self.image.list_snaps())) + + def test_remove_snap_by_id(self): + eq([], list(self.image.list_snaps())) + self.image.create_snap('snap1') + eq(['snap1'], [snap['name'] for snap in self.image.list_snaps()]) + for snap in self.image.list_snaps(): + snap_id = snap["id"] + self.image.remove_snap_by_id(snap_id) + eq([], list(self.image.list_snaps())) + + def test_rename_snap(self): + eq([], list(self.image.list_snaps())) + self.image.create_snap('snap1') + eq(['snap1'], [snap['name'] for snap in self.image.list_snaps()]) + self.image.rename_snap("snap1", "snap1-rename") + eq(['snap1-rename'], [snap['name'] for snap in self.image.list_snaps()]) + self.image.remove_snap('snap1-rename') + eq([], list(self.image.list_snaps())) + + @require_features([RBD_FEATURE_LAYERING]) + def test_protect_snap(self): + self.image.create_snap('snap1') + assert(not self.image.is_protected_snap('snap1')) + self.image.protect_snap('snap1') + assert(self.image.is_protected_snap('snap1')) + assert_raises(ImageBusy, self.image.remove_snap, 'snap1') + self.image.unprotect_snap('snap1') + assert(not self.image.is_protected_snap('snap1')) + self.image.remove_snap('snap1') + assert_raises(ImageNotFound, self.image.unprotect_snap, 'snap1') + assert_raises(ImageNotFound, self.image.is_protected_snap, 'snap1') + + def test_snap_exists(self): + self.image.create_snap('snap1') + eq(self.image.snap_exists('snap1'), True) + self.image.remove_snap('snap1') + eq(self.image.snap_exists('snap1'), False) + + def test_snap_timestamp(self): + self.image.create_snap('snap1') + eq(['snap1'], [snap['name'] for snap in self.image.list_snaps()]) + for snap in self.image.list_snaps(): + snap_id = snap["id"] + time = self.image.get_snap_timestamp(snap_id) + assert_not_equal(b'', time.year) + assert_not_equal(0, time.year) + assert_not_equal(time.year, '1970') + self.image.remove_snap('snap1') + + def test_limit_snaps(self): + self.image.set_snap_limit(2) + eq(2, self.image.get_snap_limit()) + self.image.create_snap('snap1') + self.image.create_snap('snap2') + assert_raises(DiskQuotaExceeded, self.image.create_snap, 'snap3') + self.image.remove_snap_limit() + self.image.create_snap('snap3') + + self.image.remove_snap('snap1') + self.image.remove_snap('snap2') + self.image.remove_snap('snap3') + + @require_features([RBD_FEATURE_EXCLUSIVE_LOCK]) + def test_remove_with_exclusive_lock(self): + assert_raises(ImageBusy, remove_image) + + @blocklist_features([RBD_FEATURE_EXCLUSIVE_LOCK]) + def test_remove_with_snap(self): + self.image.create_snap('snap1') + assert_raises(ImageHasSnapshots, remove_image) + self.image.remove_snap('snap1') + + @blocklist_features([RBD_FEATURE_EXCLUSIVE_LOCK]) + def test_remove_with_watcher(self): + data = rand_data(256) + self.image.write(data, 0) + assert_raises(ImageBusy, remove_image) + read = self.image.read(0, 256) + eq(read, data) + + def test_rollback_to_snap(self): + self.image.write(b'\0' * 256, 0) + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + self.image.rollback_to_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + self.image.remove_snap('snap1') + + def test_rollback_to_snap_sparse(self): + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + self.image.rollback_to_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + self.image.remove_snap('snap1') + + def test_rollback_with_resize(self): + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, data) + new_size = IMG_SIZE * 2 + self.image.resize(new_size) + check_stat(self.image.stat(), new_size, IMG_ORDER) + self.image.write(data, new_size - 256) + self.image.create_snap('snap2') + read = self.image.read(new_size - 256, 256) + eq(read, data) + self.image.rollback_to_snap('snap1') + check_stat(self.image.stat(), IMG_SIZE, IMG_ORDER) + assert_raises(InvalidArgument, self.image.read, new_size - 256, 256) + self.image.rollback_to_snap('snap2') + check_stat(self.image.stat(), new_size, IMG_ORDER) + read = self.image.read(new_size - 256, 256) + eq(read, data) + self.image.remove_snap('snap1') + self.image.remove_snap('snap2') + + def test_set_snap(self): + self.image.write(b'\0' * 256, 0) + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + self.image.set_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + assert_raises(ReadOnlyImage, self.image.write, data, 0) + self.image.remove_snap('snap1') + + def test_set_no_snap(self): + self.image.write(b'\0' * 256, 0) + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + self.image.set_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + assert_raises(ReadOnlyImage, self.image.write, data, 0) + self.image.set_snap(None) + read = self.image.read(0, 256) + eq(read, data) + self.image.remove_snap('snap1') + + def test_set_snap_by_id(self): + self.image.write(b'\0' * 256, 0) + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + snaps = list(self.image.list_snaps()) + self.image.set_snap_by_id(snaps[0]['id']) + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + assert_raises(ReadOnlyImage, self.image.write, data, 0) + self.image.set_snap_by_id(None) + read = self.image.read(0, 256) + eq(read, data) + self.image.remove_snap('snap1') + + def test_snap_get_name(self): + eq([], list(self.image.list_snaps())) + self.image.create_snap('snap1') + self.image.create_snap('snap2') + self.image.create_snap('snap3') + + for snap in self.image.list_snaps(): + expected_snap_name = self.image.snap_get_name(snap['id']) + eq(expected_snap_name, snap['name']) + self.image.remove_snap('snap1') + self.image.remove_snap('snap2') + self.image.remove_snap('snap3') + eq([], list(self.image.list_snaps())) + + assert_raises(ImageNotFound, self.image.snap_get_name, 1) + + def test_snap_get_id(self): + eq([], list(self.image.list_snaps())) + self.image.create_snap('snap1') + self.image.create_snap('snap2') + self.image.create_snap('snap3') + + for snap in self.image.list_snaps(): + expected_snap_id = self.image.snap_get_id(snap['name']) + eq(expected_snap_id, snap['id']) + self.image.remove_snap('snap1') + self.image.remove_snap('snap2') + self.image.remove_snap('snap3') + eq([], list(self.image.list_snaps())) + + assert_raises(ImageNotFound, self.image.snap_get_id, 'snap1') + + def test_set_snap_sparse(self): + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + self.image.set_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + assert_raises(ReadOnlyImage, self.image.write, data, 0) + self.image.remove_snap('snap1') + + def test_many_snaps(self): + num_snaps = 200 + for i in range(num_snaps): + self.image.create_snap(str(i)) + snaps = sorted(self.image.list_snaps(), + key=lambda snap: int(snap['name'])) + eq(len(snaps), num_snaps) + for i, snap in enumerate(snaps): + eq(snap['size'], IMG_SIZE) + eq(snap['name'], str(i)) + for i in range(num_snaps): + self.image.remove_snap(str(i)) + + def test_set_snap_deleted(self): + self.image.write(b'\0' * 256, 0) + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + self.image.set_snap('snap1') + self.image.remove_snap('snap1') + assert_raises(ImageNotFound, self.image.read, 0, 256) + self.image.set_snap(None) + read = self.image.read(0, 256) + eq(read, data) + + def test_set_snap_recreated(self): + self.image.write(b'\0' * 256, 0) + self.image.create_snap('snap1') + read = self.image.read(0, 256) + eq(read, b'\0' * 256) + data = rand_data(256) + self.image.write(data, 0) + read = self.image.read(0, 256) + eq(read, data) + self.image.set_snap('snap1') + self.image.remove_snap('snap1') + self.image.create_snap('snap1') + assert_raises(ImageNotFound, self.image.read, 0, 256) + self.image.set_snap(None) + read = self.image.read(0, 256) + eq(read, data) + self.image.remove_snap('snap1') + + def test_lock_unlock(self): + assert_raises(ImageNotFound, self.image.unlock, '') + self.image.lock_exclusive('') + assert_raises(ImageExists, self.image.lock_exclusive, '') + assert_raises(ImageBusy, self.image.lock_exclusive, 'test') + assert_raises(ImageExists, self.image.lock_shared, '', '') + assert_raises(ImageBusy, self.image.lock_shared, 'foo', '') + self.image.unlock('') + + def test_list_lockers(self): + eq([], self.image.list_lockers()) + self.image.lock_exclusive('test') + lockers = self.image.list_lockers() + eq(1, len(lockers['lockers'])) + _, cookie, _ = lockers['lockers'][0] + eq(cookie, 'test') + eq('', lockers['tag']) + assert lockers['exclusive'] + self.image.unlock('test') + eq([], self.image.list_lockers()) + + num_shared = 10 + for i in range(num_shared): + self.image.lock_shared(str(i), 'tag') + lockers = self.image.list_lockers() + eq('tag', lockers['tag']) + assert not lockers['exclusive'] + eq(num_shared, len(lockers['lockers'])) + cookies = sorted(map(lambda x: x[1], lockers['lockers'])) + for i in range(num_shared): + eq(str(i), cookies[i]) + self.image.unlock(str(i)) + eq([], self.image.list_lockers()) + + def test_diff_iterate(self): + check_diff(self.image, 0, IMG_SIZE, None, []) + self.image.write(b'a' * 256, 0) + check_diff(self.image, 0, IMG_SIZE, None, [(0, 256, True)]) + self.image.write(b'b' * 256, 256) + check_diff(self.image, 0, IMG_SIZE, None, [(0, 512, True)]) + self.image.discard(128, 256) + check_diff(self.image, 0, IMG_SIZE, None, [(0, 512, True)]) + + self.image.create_snap('snap1') + self.image.discard(0, 1 << IMG_ORDER) + self.image.create_snap('snap2') + self.image.set_snap('snap2') + check_diff(self.image, 0, IMG_SIZE, 'snap1', [(0, 512, False)]) + self.image.remove_snap('snap1') + self.image.remove_snap('snap2') + + def test_aio_read(self): + # this is a list so that the local cb() can modify it + retval = [None] + def cb(_, buf): + retval[0] = buf + + # test1: success case + comp = self.image.aio_read(0, 20, cb) + comp.wait_for_complete_and_cb() + eq(retval[0], b'\0' * 20) + eq(comp.get_return_value(), 20) + eq(sys.getrefcount(comp), 2) + + # test2: error case + retval[0] = 1 + comp = self.image.aio_read(IMG_SIZE, 20, cb) + comp.wait_for_complete_and_cb() + eq(None, retval[0]) + assert(comp.get_return_value() < 0) + eq(sys.getrefcount(comp), 2) + + def test_aio_write(self): + retval = [None] + def cb(comp): + retval[0] = comp.get_return_value() + + data = rand_data(256) + comp = self.image.aio_write(data, 256, cb) + comp.wait_for_complete_and_cb() + eq(retval[0], 0) + eq(comp.get_return_value(), 0) + eq(sys.getrefcount(comp), 2) + eq(self.image.read(256, 256), data) + + def test_aio_discard(self): + retval = [None] + def cb(comp): + retval[0] = comp.get_return_value() + + data = rand_data(256) + self.image.write(data, 0) + comp = self.image.aio_discard(0, 256, cb) + comp.wait_for_complete_and_cb() + eq(retval[0], 0) + eq(comp.get_return_value(), 0) + eq(sys.getrefcount(comp), 2) + eq(self.image.read(256, 256), b'\0' * 256) + + def test_aio_write_zeroes(self): + retval = [None] + def cb(comp): + retval[0] = comp.get_return_value() + + data = rand_data(256) + self.image.write(data, 0) + comp = self.image.aio_write_zeroes(0, 256, cb) + comp.wait_for_complete_and_cb() + eq(retval[0], 0) + eq(comp.get_return_value(), 0) + eq(sys.getrefcount(comp), 2) + eq(self.image.read(256, 256), b'\0' * 256) + + def test_aio_flush(self): + retval = [None] + def cb(comp): + retval[0] = comp.get_return_value() + + comp = self.image.aio_flush(cb) + comp.wait_for_complete_and_cb() + eq(retval[0], 0) + eq(sys.getrefcount(comp), 2) + + def test_metadata(self): + metadata = list(self.image.metadata_list()) + eq(len(metadata), 0) + assert_raises(KeyError, self.image.metadata_get, "key1") + self.image.metadata_set("key1", "value1") + self.image.metadata_set("key2", "value2") + value = self.image.metadata_get("key1") + eq(value, "value1") + value = self.image.metadata_get("key2") + eq(value, "value2") + metadata = list(self.image.metadata_list()) + eq(len(metadata), 2) + self.image.metadata_remove("key1") + metadata = list(self.image.metadata_list()) + eq(len(metadata), 1) + eq(metadata[0], ("key2", "value2")) + self.image.metadata_remove("key2") + assert_raises(KeyError, self.image.metadata_remove, "key2") + metadata = list(self.image.metadata_list()) + eq(len(metadata), 0) + + N = 65 + for i in range(N): + self.image.metadata_set("key" + str(i), "X" * 1025) + metadata = list(self.image.metadata_list()) + eq(len(metadata), N) + for i in range(N): + self.image.metadata_remove("key" + str(i)) + metadata = list(self.image.metadata_list()) + eq(len(metadata), N - i - 1) + + def test_watchers_list(self): + watchers = list(self.image.watchers_list()) + # The image is open (in r/w mode) from setup, so expect there to be one + # watcher. + eq(len(watchers), 1) + + def test_config_list(self): + with Image(ioctx, image_name) as image: + for option in image.config_list(): + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + + image.metadata_set("conf_rbd_cache", "true") + + for option in image.config_list(): + if option['name'] == "rbd_cache": + eq(option['source'], RBD_CONFIG_SOURCE_IMAGE) + else: + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + + image.metadata_remove("conf_rbd_cache") + + for option in image.config_list(): + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + + def test_image_config_set_and_get_and_remove(self): + with Image(ioctx, image_name) as image: + for option in image.config_list(): + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + + image.config_set("rbd_request_timed_out_seconds", "100") + modify_value = image.config_get("rbd_request_timed_out_seconds") + eq(modify_value, '100') + + image.config_remove("rbd_request_timed_out_seconds") + + for option in image.config_list(): + eq(option['source'], RBD_CONFIG_SOURCE_CONFIG) + + def test_sparsify(self): + assert_raises(InvalidArgument, self.image.sparsify, 16) + self.image.sparsify(4096) + + @require_linux() + @blocklist_features([RBD_FEATURE_JOURNALING]) + def test_encryption_luks1(self): + data = b'hello world' + offset = 16<<20 + image_size = 32<<20 + + with Image(ioctx, image_name) as image: + image.resize(image_size) + image.write(data, offset) + image.encryption_format(RBD_ENCRYPTION_FORMAT_LUKS1, "password") + assert_not_equal(data, image.read(offset, len(data))) + with Image(ioctx, image_name) as image: + image.encryption_load(RBD_ENCRYPTION_FORMAT_LUKS1, "password") + assert_not_equal(data, image.read(offset, len(data))) + image.write(data, offset) + with Image(ioctx, image_name) as image: + image.encryption_load(RBD_ENCRYPTION_FORMAT_LUKS1, "password") + eq(data, image.read(offset, len(data))) + + @require_linux() + @blocklist_features([RBD_FEATURE_JOURNALING]) + def test_encryption_luks2(self): + data = b'hello world' + offset = 16<<20 + image_size = 256<<20 + + with Image(ioctx, image_name) as image: + image.resize(image_size) + image.write(data, offset) + image.encryption_format(RBD_ENCRYPTION_FORMAT_LUKS2, "password") + assert_not_equal(data, image.read(offset, len(data))) + with Image(ioctx, image_name) as image: + image.encryption_load(RBD_ENCRYPTION_FORMAT_LUKS2, "password") + assert_not_equal(data, image.read(offset, len(data))) + image.write(data, offset) + with Image(ioctx, image_name) as image: + image.encryption_load(RBD_ENCRYPTION_FORMAT_LUKS2, "password") + eq(data, image.read(offset, len(data))) + + +class TestImageId(object): + + def setUp(self): + self.rbd = RBD() + create_image() + self.image = Image(ioctx, image_name) + self.image2 = Image(ioctx, None, None, False, self.image.id()) + + def tearDown(self): + self.image.close() + self.image2.close() + remove_image() + self.image = None + self.image2 = None + + def test_read(self): + data = self.image2.read(0, 20) + eq(data, b'\0' * 20) + + def test_write(self): + data = rand_data(256) + self.image2.write(data, 0) + + def test_resize(self): + new_size = IMG_SIZE * 2 + self.image2.resize(new_size) + info = self.image2.stat() + check_stat(info, new_size, IMG_ORDER) + +def check_diff(image, offset, length, from_snapshot, expected): + extents = [] + def cb(offset, length, exists): + extents.append((offset, length, exists)) + image.diff_iterate(0, IMG_SIZE, None, cb) + eq(extents, expected) + +class TestClone(object): + + @require_features([RBD_FEATURE_LAYERING]) + def setUp(self): + global ioctx + global features + self.rbd = RBD() + create_image() + self.image = Image(ioctx, image_name) + data = rand_data(256) + self.image.write(data, IMG_SIZE // 2) + self.image.create_snap('snap1') + global features + self.image.protect_snap('snap1') + self.clone_name = get_temp_image_name() + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, self.clone_name, + features) + self.clone = Image(ioctx, self.clone_name) + + def tearDown(self): + global ioctx + self.clone.close() + self.rbd.remove(ioctx, self.clone_name) + self.image.unprotect_snap('snap1') + self.image.remove_snap('snap1') + self.image.close() + remove_image() + + def _test_with_params(self, features=None, order=None, stripe_unit=None, + stripe_count=None): + self.image.create_snap('snap2') + self.image.protect_snap('snap2') + clone_name2 = get_temp_image_name() + if features is None: + self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2) + elif order is None: + self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2, + features) + elif stripe_unit is None: + self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2, + features, order) + elif stripe_count is None: + self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2, + features, order, stripe_unit) + else: + self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name2, + features, order, stripe_unit, stripe_count) + self.rbd.remove(ioctx, clone_name2) + self.image.unprotect_snap('snap2') + self.image.remove_snap('snap2') + + def test_with_params(self): + self._test_with_params() + + def test_with_params2(self): + global features + self._test_with_params(features, self.image.stat()['order']) + + @require_features([RBD_FEATURE_STRIPINGV2]) + def test_with_params3(self): + global features + self._test_with_params(features, self.image.stat()['order'], + self.image.stripe_unit(), + self.image.stripe_count()) + + def test_stripe_unit_and_count(self): + global features + global ioctx + image_name = get_temp_image_name() + RBD().create(ioctx, image_name, IMG_SIZE, IMG_ORDER, old_format=False, + features=int(features), stripe_unit=1048576, stripe_count=8) + image = Image(ioctx, image_name) + image.create_snap('snap1') + image.protect_snap('snap1') + clone_name = get_temp_image_name() + RBD().clone(ioctx, image_name, 'snap1', ioctx, clone_name) + clone = Image(ioctx, clone_name) + + eq(1048576, clone.stripe_unit()) + eq(8, clone.stripe_count()) + + clone.close() + RBD().remove(ioctx, clone_name) + image.unprotect_snap('snap1') + image.remove_snap('snap1') + image.close() + RBD().remove(ioctx, image_name) + + + def test_unprotected(self): + self.image.create_snap('snap2') + global features + clone_name2 = get_temp_image_name() + rados.conf_set("rbd_default_clone_format", "1") + assert_raises(InvalidArgument, self.rbd.clone, ioctx, image_name, + 'snap2', ioctx, clone_name2, features) + rados.conf_set("rbd_default_clone_format", "auto") + self.image.remove_snap('snap2') + + def test_unprotect_with_children(self): + global features + # can't remove a snapshot that has dependent clones + assert_raises(ImageBusy, self.image.remove_snap, 'snap1') + + # validate parent info of clone created by TestClone.setUp + (pool, image, snap) = self.clone.parent_info() + eq(pool, pool_name) + eq(image, image_name) + eq(snap, 'snap1') + eq(self.image.id(), self.clone.parent_id()) + + # create a new pool... + pool_name2 = get_temp_pool_name() + rados.create_pool(pool_name2) + other_ioctx = rados.open_ioctx(pool_name2) + other_ioctx.application_enable('rbd') + + # ...with a clone of the same parent + other_clone_name = get_temp_image_name() + rados.conf_set("rbd_default_clone_format", "1") + self.rbd.clone(ioctx, image_name, 'snap1', other_ioctx, + other_clone_name, features) + rados.conf_set("rbd_default_clone_format", "auto") + self.other_clone = Image(other_ioctx, other_clone_name) + # validate its parent info + (pool, image, snap) = self.other_clone.parent_info() + eq(pool, pool_name) + eq(image, image_name) + eq(snap, 'snap1') + eq(self.image.id(), self.other_clone.parent_id()) + + # can't unprotect snap with children + assert_raises(ImageBusy, self.image.unprotect_snap, 'snap1') + + # 2 children, check that cannot remove the parent snap + assert_raises(ImageBusy, self.image.remove_snap, 'snap1') + + # close and remove other pool's clone + self.other_clone.close() + self.rbd.remove(other_ioctx, other_clone_name) + + # check that we cannot yet remove the parent snap + assert_raises(ImageBusy, self.image.remove_snap, 'snap1') + + other_ioctx.close() + rados.delete_pool(pool_name2) + + # unprotect, remove parent snap happen in cleanup, and should succeed + + def test_stat(self): + image_info = self.image.stat() + clone_info = self.clone.stat() + eq(clone_info['size'], image_info['size']) + eq(clone_info['size'], self.clone.overlap()) + + def test_resize_stat(self): + self.clone.resize(IMG_SIZE // 2) + image_info = self.image.stat() + clone_info = self.clone.stat() + eq(clone_info['size'], IMG_SIZE // 2) + eq(image_info['size'], IMG_SIZE) + eq(self.clone.overlap(), IMG_SIZE // 2) + + self.clone.resize(IMG_SIZE * 2) + image_info = self.image.stat() + clone_info = self.clone.stat() + eq(clone_info['size'], IMG_SIZE * 2) + eq(image_info['size'], IMG_SIZE) + eq(self.clone.overlap(), IMG_SIZE // 2) + + def test_resize_io(self): + parent_data = self.image.read(IMG_SIZE // 2, 256) + self.image.resize(0) + self.clone.resize(IMG_SIZE // 2 + 128) + child_data = self.clone.read(IMG_SIZE // 2, 128) + eq(child_data, parent_data[:128]) + self.clone.resize(IMG_SIZE) + child_data = self.clone.read(IMG_SIZE // 2, 256) + eq(child_data, parent_data[:128] + (b'\0' * 128)) + self.clone.resize(IMG_SIZE // 2 + 1) + child_data = self.clone.read(IMG_SIZE // 2, 1) + eq(child_data, parent_data[0:1]) + self.clone.resize(0) + self.clone.resize(IMG_SIZE) + child_data = self.clone.read(IMG_SIZE // 2, 256) + eq(child_data, b'\0' * 256) + + def test_read(self): + parent_data = self.image.read(IMG_SIZE // 2, 256) + child_data = self.clone.read(IMG_SIZE // 2, 256) + eq(child_data, parent_data) + + def test_write(self): + parent_data = self.image.read(IMG_SIZE // 2, 256) + new_data = rand_data(256) + self.clone.write(new_data, IMG_SIZE // 2 + 256) + child_data = self.clone.read(IMG_SIZE // 2 + 256, 256) + eq(child_data, new_data) + child_data = self.clone.read(IMG_SIZE // 2, 256) + eq(child_data, parent_data) + parent_data = self.image.read(IMG_SIZE // 2 + 256, 256) + eq(parent_data, b'\0' * 256) + + def check_children(self, expected): + actual = self.image.list_children() + # dedup for cache pools until + # http://tracker.ceph.com/issues/8187 is fixed + deduped = set([(pool_name, image[1]) for image in actual]) + eq(deduped, set(expected)) + + def check_children2(self, expected): + actual = [{k:v for k,v in x.items() if k in expected[0]} \ + for x in self.image.list_children2()] + eq(actual, expected) + + def check_descendants(self, expected): + eq(list(self.image.list_descendants()), expected) + + def get_image_id(self, ioctx, name): + with Image(ioctx, name) as image: + return image.id() + + def test_list_children(self): + global ioctx + global features + self.image.set_snap('snap1') + self.check_children([(pool_name, self.clone_name)]) + self.check_children2( + [{'pool': pool_name, 'pool_namespace': '', + 'image': self.clone_name, 'trash': False, + 'id': self.get_image_id(ioctx, self.clone_name)}]) + self.check_descendants( + [{'pool': pool_name, 'pool_namespace': '', + 'image': self.clone_name, 'trash': False, + 'id': self.get_image_id(ioctx, self.clone_name)}]) + self.clone.close() + self.rbd.remove(ioctx, self.clone_name) + eq(self.image.list_children(), []) + eq(list(self.image.list_children2()), []) + eq(list(self.image.list_descendants()), []) + + clone_name = get_temp_image_name() + '_' + expected_children = [] + expected_children2 = [] + for i in range(10): + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, + clone_name + str(i), features) + expected_children.append((pool_name, clone_name + str(i))) + expected_children2.append( + {'pool': pool_name, 'pool_namespace': '', + 'image': clone_name + str(i), 'trash': False, + 'id': self.get_image_id(ioctx, clone_name + str(i))}) + self.check_children(expected_children) + self.check_children2(expected_children2) + self.check_descendants(expected_children2) + + image6_id = self.get_image_id(ioctx, clone_name + str(5)) + RBD().trash_move(ioctx, clone_name + str(5), 0) + expected_children.remove((pool_name, clone_name + str(5))) + for item in expected_children2: + for k, v in item.items(): + if v == image6_id: + item["trash"] = True + self.check_children(expected_children) + self.check_children2(expected_children2) + self.check_descendants(expected_children2) + + RBD().trash_restore(ioctx, image6_id, clone_name + str(5)) + expected_children.append((pool_name, clone_name + str(5))) + for item in expected_children2: + for k, v in item.items(): + if v == image6_id: + item["trash"] = False + self.check_children(expected_children) + self.check_children2(expected_children2) + self.check_descendants(expected_children2) + + for i in range(10): + self.rbd.remove(ioctx, clone_name + str(i)) + expected_children.remove((pool_name, clone_name + str(i))) + expected_children2.pop(0) + self.check_children(expected_children) + self.check_children2(expected_children2) + self.check_descendants(expected_children2) + + eq(self.image.list_children(), []) + eq(list(self.image.list_children2()), []) + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, self.clone_name, + features) + self.check_children([(pool_name, self.clone_name)]) + self.check_children2( + [{'pool': pool_name, 'pool_namespace': '', + 'image': self.clone_name, 'trash': False, + 'id': self.get_image_id(ioctx, self.clone_name)}]) + self.check_descendants( + [{'pool': pool_name, 'pool_namespace': '', + 'image': self.clone_name, 'trash': False, + 'id': self.get_image_id(ioctx, self.clone_name)}]) + self.clone = Image(ioctx, self.clone_name) + + def test_flatten_errors(self): + # test that we can't flatten a non-clone + assert_raises(InvalidArgument, self.image.flatten) + + # test that we can't flatten a snapshot + self.clone.create_snap('snap2') + self.clone.set_snap('snap2') + assert_raises(ReadOnlyImage, self.clone.flatten) + self.clone.remove_snap('snap2') + + def check_flatten_with_order(self, new_order, stripe_unit=None, + stripe_count=None): + global ioctx + global features + clone_name2 = get_temp_image_name() + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name2, + features, new_order, stripe_unit, stripe_count) + #with Image(ioctx, 'clone2') as clone: + clone2 = Image(ioctx, clone_name2) + clone2.flatten() + eq(clone2.overlap(), 0) + clone2.close() + self.rbd.remove(ioctx, clone_name2) + + # flatten after resizing to non-block size + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name2, + features, new_order, stripe_unit, stripe_count) + with Image(ioctx, clone_name2) as clone: + clone.resize(IMG_SIZE // 2 - 1) + clone.flatten() + eq(0, clone.overlap()) + self.rbd.remove(ioctx, clone_name2) + + # flatten after resizing to non-block size + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name2, + features, new_order, stripe_unit, stripe_count) + with Image(ioctx, clone_name2) as clone: + clone.resize(IMG_SIZE // 2 + 1) + clone.flatten() + eq(clone.overlap(), 0) + self.rbd.remove(ioctx, clone_name2) + + def test_flatten_basic(self): + self.check_flatten_with_order(IMG_ORDER) + + def test_flatten_smaller_order(self): + self.check_flatten_with_order(IMG_ORDER - 2, 1048576, 1) + + def test_flatten_larger_order(self): + self.check_flatten_with_order(IMG_ORDER + 2) + + def test_flatten_drops_cache(self): + global ioctx + global features + clone_name2 = get_temp_image_name() + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name2, + features, IMG_ORDER) + with Image(ioctx, clone_name2) as clone: + with Image(ioctx, clone_name2) as clone2: + # cache object non-existence + data = clone.read(IMG_SIZE // 2, 256) + clone2_data = clone2.read(IMG_SIZE // 2, 256) + eq(data, clone2_data) + clone.flatten() + assert_raises(ImageNotFound, clone.parent_info) + assert_raises(ImageNotFound, clone2.parent_info) + assert_raises(ImageNotFound, clone.parent_id) + assert_raises(ImageNotFound, clone2.parent_id) + after_flatten = clone.read(IMG_SIZE // 2, 256) + eq(data, after_flatten) + after_flatten = clone2.read(IMG_SIZE // 2, 256) + eq(data, after_flatten) + self.rbd.remove(ioctx, clone_name2) + + def test_flatten_multi_level(self): + self.clone.create_snap('snap2') + self.clone.protect_snap('snap2') + clone_name3 = get_temp_image_name() + self.rbd.clone(ioctx, self.clone_name, 'snap2', ioctx, clone_name3, + features) + self.clone.flatten() + with Image(ioctx, clone_name3) as clone3: + clone3.flatten() + self.clone.unprotect_snap('snap2') + self.clone.remove_snap('snap2') + self.rbd.remove(ioctx, clone_name3) + + def test_flatten_with_progress(self): + d = {'received_callback': False} + def progress_cb(current, total): + d['received_callback'] = True + return 0 + + global ioctx + global features + clone_name = get_temp_image_name() + self.rbd.clone(ioctx, image_name, 'snap1', ioctx, clone_name, + features, 0) + with Image(ioctx, clone_name) as clone: + clone.flatten(on_progress=progress_cb) + self.rbd.remove(ioctx, clone_name) + eq(True, d['received_callback']) + + def test_resize_flatten_multi_level(self): + self.clone.create_snap('snap2') + self.clone.protect_snap('snap2') + clone_name3 = get_temp_image_name() + self.rbd.clone(ioctx, self.clone_name, 'snap2', ioctx, clone_name3, + features) + self.clone.resize(1) + orig_data = self.image.read(0, 256) + with Image(ioctx, clone_name3) as clone3: + clone3_data = clone3.read(0, 256) + eq(orig_data, clone3_data) + self.clone.flatten() + with Image(ioctx, clone_name3) as clone3: + clone3_data = clone3.read(0, 256) + eq(orig_data, clone3_data) + self.rbd.remove(ioctx, clone_name3) + self.clone.unprotect_snap('snap2') + self.clone.remove_snap('snap2') + + def test_trash_snapshot(self): + self.image.create_snap('snap2') + global features + clone_name = get_temp_image_name() + rados.conf_set("rbd_default_clone_format", "2") + self.rbd.clone(ioctx, image_name, 'snap2', ioctx, clone_name, features) + rados.conf_set("rbd_default_clone_format", "auto") + + self.image.remove_snap('snap2') + + snaps = [s for s in self.image.list_snaps() if s['name'] != 'snap1'] + eq([RBD_SNAP_NAMESPACE_TYPE_TRASH], [s['namespace'] for s in snaps]) + eq([{'original_name' : 'snap2'}], [s['trash'] for s in snaps]) + + self.rbd.remove(ioctx, clone_name) + eq([], [s for s in self.image.list_snaps() if s['name'] != 'snap1']) + +class TestExclusiveLock(object): + + @require_features([RBD_FEATURE_EXCLUSIVE_LOCK]) + def setUp(self): + global rados2 + rados2 = Rados(conffile='') + rados2.connect() + global ioctx2 + ioctx2 = rados2.open_ioctx(pool_name) + create_image() + + def tearDown(self): + remove_image() + global ioctx2 + ioctx2.close() + global rados2 + rados2.shutdown() + + def test_ownership(self): + with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: + image1.write(b'0'*256, 0) + eq(image1.is_exclusive_lock_owner(), True) + eq(image2.is_exclusive_lock_owner(), False) + + def test_snapshot_leadership(self): + with Image(ioctx, image_name) as image: + image.create_snap('snap') + eq(image.is_exclusive_lock_owner(), True) + try: + with Image(ioctx, image_name) as image: + image.write(b'0'*256, 0) + eq(image.is_exclusive_lock_owner(), True) + image.set_snap('snap') + eq(image.is_exclusive_lock_owner(), False) + with Image(ioctx, image_name, snapshot='snap') as image: + eq(image.is_exclusive_lock_owner(), False) + finally: + with Image(ioctx, image_name) as image: + image.remove_snap('snap') + + def test_read_only_leadership(self): + with Image(ioctx, image_name, read_only=True) as image: + eq(image.is_exclusive_lock_owner(), False) + + def test_follower_flatten(self): + with Image(ioctx, image_name) as image: + image.create_snap('snap') + image.protect_snap('snap') + try: + RBD().clone(ioctx, image_name, 'snap', ioctx, 'clone', features) + with Image(ioctx, 'clone') as image1, Image(ioctx2, 'clone') as image2: + data = rand_data(256) + image1.write(data, 0) + image2.flatten() + assert_raises(ImageNotFound, image1.parent_info) + assert_raises(ImageNotFound, image1.parent_id) + parent = True + for x in range(30): + try: + image2.parent_info() + except ImageNotFound: + parent = False + break + eq(False, parent) + finally: + RBD().remove(ioctx, 'clone') + with Image(ioctx, image_name) as image: + image.unprotect_snap('snap') + image.remove_snap('snap') + + def test_follower_resize(self): + with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: + image1.write(b'0'*256, 0) + for new_size in [IMG_SIZE * 2, IMG_SIZE // 2]: + image2.resize(new_size); + eq(new_size, image1.size()) + for x in range(30): + if new_size == image2.size(): + break + time.sleep(1) + eq(new_size, image2.size()) + + def test_follower_snap_create(self): + with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: + image2.create_snap('snap1') + image1.remove_snap('snap1') + + def test_follower_snap_rollback(self): + with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: + image1.create_snap('snap') + try: + assert_raises(ReadOnlyImage, image2.rollback_to_snap, 'snap') + image1.rollback_to_snap('snap') + finally: + image1.remove_snap('snap') + + def test_follower_discard(self): + global rados + with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: + data = rand_data(256) + image1.write(data, 0) + image2.discard(0, 256) + eq(image1.is_exclusive_lock_owner(), False) + eq(image2.is_exclusive_lock_owner(), True) + read = image2.read(0, 256) + if rados.conf_get('rbd_skip_partial_discard') == 'false': + eq(256 * b'\0', read) + else: + eq(data, read) + + def test_follower_write(self): + with Image(ioctx, image_name) as image1, Image(ioctx2, image_name) as image2: + data = rand_data(256) + image1.write(data, 0) + image2.write(data, IMG_SIZE // 2) + eq(image1.is_exclusive_lock_owner(), False) + eq(image2.is_exclusive_lock_owner(), True) + for offset in [0, IMG_SIZE // 2]: + read = image2.read(offset, 256) + eq(data, read) + def test_acquire_release_lock(self): + with Image(ioctx, image_name) as image: + image.lock_acquire(RBD_LOCK_MODE_EXCLUSIVE) + image.lock_release() + + def test_break_lock(self): + blocklist_rados = Rados(conffile='') + blocklist_rados.connect() + try: + blocklist_ioctx = blocklist_rados.open_ioctx(pool_name) + try: + rados2.conf_set('rbd_blocklist_on_break_lock', 'true') + with Image(ioctx2, image_name) as image, \ + Image(blocklist_ioctx, image_name) as blocklist_image: + + lock_owners = list(image.lock_get_owners()) + eq(0, len(lock_owners)) + + blocklist_image.lock_acquire(RBD_LOCK_MODE_EXCLUSIVE) + assert_raises(ReadOnlyImage, image.lock_acquire, + RBD_LOCK_MODE_EXCLUSIVE) + lock_owners = list(image.lock_get_owners()) + eq(1, len(lock_owners)) + eq(RBD_LOCK_MODE_EXCLUSIVE, lock_owners[0]['mode']) + image.lock_break(RBD_LOCK_MODE_EXCLUSIVE, + lock_owners[0]['owner']) + + assert_raises(ConnectionShutdown, + blocklist_image.is_exclusive_lock_owner) + + blocklist_rados.wait_for_latest_osdmap() + data = rand_data(256) + assert_raises(ConnectionShutdown, + blocklist_image.write, data, 0) + + image.lock_acquire(RBD_LOCK_MODE_EXCLUSIVE) + + try: + blocklist_image.close() + except ConnectionShutdown: + pass + finally: + blocklist_ioctx.close() + finally: + blocklist_rados.shutdown() + +class TestMirroring(object): + + @staticmethod + def check_info(info, global_id, state, primary=None): + eq(global_id, info['global_id']) + eq(state, info['state']) + if primary is not None: + eq(primary, info['primary']) + + def setUp(self): + self.rbd = RBD() + self.initial_mirror_mode = self.rbd.mirror_mode_get(ioctx) + self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_POOL) + create_image() + self.image = Image(ioctx, image_name) + + def tearDown(self): + self.image.close() + remove_image() + self.rbd.mirror_mode_set(ioctx, self.initial_mirror_mode) + + def test_uuid(self): + mirror_uuid = self.rbd.mirror_uuid_get(ioctx) + assert(mirror_uuid) + + def test_site_name(self): + site_name = "us-west-1" + self.rbd.mirror_site_name_set(rados, site_name) + eq(site_name, self.rbd.mirror_site_name_get(rados)) + self.rbd.mirror_site_name_set(rados, "") + eq(rados.get_fsid(), self.rbd.mirror_site_name_get(rados)) + + def test_mirror_peer_bootstrap(self): + eq([], list(self.rbd.mirror_peer_list(ioctx))) + + self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_DISABLED) + assert_raises(InvalidArgument, self.rbd.mirror_peer_bootstrap_create, + ioctx); + + self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_POOL) + token_b64 = self.rbd.mirror_peer_bootstrap_create(ioctx) + token = base64.b64decode(token_b64) + token_dict = json.loads(token) + eq(sorted(['fsid', 'client_id', 'key', 'mon_host']), + sorted(list(token_dict.keys()))) + + # requires different cluster + assert_raises(InvalidArgument, self.rbd.mirror_peer_bootstrap_import, + ioctx, RBD_MIRROR_PEER_DIRECTION_RX, token_b64) + + def test_mirror_peer(self): + eq([], list(self.rbd.mirror_peer_list(ioctx))) + site_name = "test_site" + client_name = "test_client" + uuid = self.rbd.mirror_peer_add(ioctx, site_name, client_name, + direction=RBD_MIRROR_PEER_DIRECTION_RX_TX) + assert(uuid) + peer = { + 'uuid' : uuid, + 'direction': RBD_MIRROR_PEER_DIRECTION_RX_TX, + 'site_name' : site_name, + 'cluster_name' : site_name, + 'mirror_uuid': '', + 'client_name' : client_name, + } + eq([peer], list(self.rbd.mirror_peer_list(ioctx))) + cluster_name = "test_cluster1" + self.rbd.mirror_peer_set_cluster(ioctx, uuid, cluster_name) + client_name = "test_client1" + self.rbd.mirror_peer_set_client(ioctx, uuid, client_name) + peer = { + 'uuid' : uuid, + 'direction': RBD_MIRROR_PEER_DIRECTION_RX_TX, + 'site_name' : cluster_name, + 'cluster_name' : cluster_name, + 'mirror_uuid': '', + 'client_name' : client_name, + } + eq([peer], list(self.rbd.mirror_peer_list(ioctx))) + + attribs = { + RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST: 'host1', + RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY: 'abc' + } + self.rbd.mirror_peer_set_attributes(ioctx, uuid, attribs) + eq(attribs, self.rbd.mirror_peer_get_attributes(ioctx, uuid)) + + self.rbd.mirror_peer_remove(ioctx, uuid) + eq([], list(self.rbd.mirror_peer_list(ioctx))) + + @require_features([RBD_FEATURE_EXCLUSIVE_LOCK, + RBD_FEATURE_JOURNALING]) + def test_mirror_image(self): + + self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_IMAGE) + self.image.mirror_image_disable(True) + info = self.image.mirror_image_get_info() + self.check_info(info, '', RBD_MIRROR_IMAGE_DISABLED, False) + + self.image.mirror_image_enable() + info = self.image.mirror_image_get_info() + global_id = info['global_id'] + self.check_info(info, global_id, RBD_MIRROR_IMAGE_ENABLED, True) + + self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_POOL) + fail = False + try: + self.image.mirror_image_disable(True) + except InvalidArgument: + fail = True + eq(True, fail) # Fails because of mirror mode pool + + self.image.mirror_image_demote() + info = self.image.mirror_image_get_info() + self.check_info(info, global_id, RBD_MIRROR_IMAGE_ENABLED, False) + + entries = dict(self.rbd.mirror_image_info_list(ioctx)) + info['mode'] = RBD_MIRROR_IMAGE_MODE_JOURNAL; + eq(info, entries[self.image.id()]) + + self.image.mirror_image_resync() + + self.image.mirror_image_promote(True) + info = self.image.mirror_image_get_info() + self.check_info(info, global_id, RBD_MIRROR_IMAGE_ENABLED, True) + + entries = dict(self.rbd.mirror_image_info_list(ioctx)) + info['mode'] = RBD_MIRROR_IMAGE_MODE_JOURNAL; + eq(info, entries[self.image.id()]) + + fail = False + try: + self.image.mirror_image_resync() + except InvalidArgument: + fail = True + eq(True, fail) # Fails because it is primary + + status = self.image.mirror_image_get_status() + eq(image_name, status['name']) + eq(False, status['up']) + eq(MIRROR_IMAGE_STATUS_STATE_UNKNOWN, status['state']) + info = status['info'] + self.check_info(info, global_id, RBD_MIRROR_IMAGE_ENABLED, True) + + @require_features([RBD_FEATURE_EXCLUSIVE_LOCK, + RBD_FEATURE_JOURNALING]) + def test_mirror_image_status(self): + info = self.image.mirror_image_get_info() + global_id = info['global_id'] + state = info['state'] + primary = info['primary'] + + status = self.image.mirror_image_get_status() + eq(image_name, status['name']) + eq(False, status['up']) + eq(MIRROR_IMAGE_STATUS_STATE_UNKNOWN, status['state']) + eq([], status['remote_statuses']) + info = status['info'] + self.check_info(info, global_id, state, primary) + + images = list(self.rbd.mirror_image_status_list(ioctx)) + eq(1, len(images)) + status = images[0] + eq(image_name, status['name']) + eq(False, status['up']) + eq(MIRROR_IMAGE_STATUS_STATE_UNKNOWN, status['state']) + info = status['info'] + self.check_info(info, global_id, state) + + states = self.rbd.mirror_image_status_summary(ioctx) + eq([(MIRROR_IMAGE_STATUS_STATE_UNKNOWN, 1)], states) + + assert_raises(ImageNotFound, self.image.mirror_image_get_instance_id) + instance_ids = list(self.rbd.mirror_image_instance_id_list(ioctx)) + eq(0, len(instance_ids)) + + N = 65 + for i in range(N): + self.rbd.create(ioctx, image_name + str(i), IMG_SIZE, IMG_ORDER, + old_format=False, features=int(features)) + images = list(self.rbd.mirror_image_status_list(ioctx)) + eq(N + 1, len(images)) + for i in range(N): + self.rbd.remove(ioctx, image_name + str(i)) + + def test_mirror_image_create_snapshot(self): + assert_raises(InvalidArgument, self.image.mirror_image_create_snapshot) + + peer1_uuid = self.rbd.mirror_peer_add(ioctx, "cluster1", "client") + peer2_uuid = self.rbd.mirror_peer_add(ioctx, "cluster2", "client") + self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_IMAGE) + self.image.mirror_image_disable(False) + self.image.mirror_image_enable(RBD_MIRROR_IMAGE_MODE_SNAPSHOT) + mode = self.image.mirror_image_get_mode() + eq(RBD_MIRROR_IMAGE_MODE_SNAPSHOT, mode) + + snaps = list(self.image.list_snaps()) + eq(1, len(snaps)) + snap = snaps[0] + eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) + eq(RBD_SNAP_MIRROR_STATE_PRIMARY, snap['mirror']['state']) + + info = self.image.mirror_image_get_info() + eq(True, info['primary']) + entries = dict( + self.rbd.mirror_image_info_list(ioctx, + RBD_MIRROR_IMAGE_MODE_SNAPSHOT)) + info['mode'] = RBD_MIRROR_IMAGE_MODE_SNAPSHOT; + eq(info, entries[self.image.id()]) + + snap_id = self.image.mirror_image_create_snapshot( + RBD_SNAP_CREATE_SKIP_QUIESCE) + + snaps = list(self.image.list_snaps()) + eq(2, len(snaps)) + snap = snaps[0] + eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) + eq(RBD_SNAP_MIRROR_STATE_PRIMARY, snap['mirror']['state']) + snap = snaps[1] + eq(snap['id'], snap_id) + eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) + eq(RBD_SNAP_MIRROR_STATE_PRIMARY, snap['mirror']['state']) + eq(sorted([peer1_uuid, peer2_uuid]), + sorted(snap['mirror']['mirror_peer_uuids'])) + + eq(RBD_SNAP_NAMESPACE_TYPE_MIRROR, + self.image.snap_get_namespace_type(snap_id)) + mirror_snap = self.image.snap_get_mirror_namespace(snap_id) + eq(mirror_snap, snap['mirror']) + + self.image.mirror_image_demote() + + assert_raises(InvalidArgument, self.image.mirror_image_create_snapshot) + + snaps = list(self.image.list_snaps()) + eq(3, len(snaps)) + snap = snaps[0] + eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) + snap = snaps[1] + eq(snap['id'], snap_id) + eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) + snap = snaps[2] + eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) + eq(RBD_SNAP_MIRROR_STATE_PRIMARY_DEMOTED, snap['mirror']['state']) + eq(sorted([peer1_uuid, peer2_uuid]), + sorted(snap['mirror']['mirror_peer_uuids'])) + + self.rbd.mirror_peer_remove(ioctx, peer1_uuid) + self.rbd.mirror_peer_remove(ioctx, peer2_uuid) + self.image.mirror_image_promote(False) + + def test_aio_mirror_image_create_snapshot(self): + peer_uuid = self.rbd.mirror_peer_add(ioctx, "cluster", "client") + self.rbd.mirror_mode_set(ioctx, RBD_MIRROR_MODE_IMAGE) + self.image.mirror_image_disable(False) + self.image.mirror_image_enable(RBD_MIRROR_IMAGE_MODE_SNAPSHOT) + + snaps = list(self.image.list_snaps()) + eq(1, len(snaps)) + snap = snaps[0] + eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) + eq(RBD_SNAP_MIRROR_STATE_PRIMARY, snap['mirror']['state']) + + # this is a list so that the local cb() can modify it + info = [None] + def cb(_, _info): + info[0] = _info + + comp = self.image.aio_mirror_image_get_info(cb) + comp.wait_for_complete_and_cb() + assert_not_equal(info[0], None) + eq(comp.get_return_value(), 0) + eq(sys.getrefcount(comp), 2) + info = info[0] + global_id = info['global_id'] + self.check_info(info, global_id, RBD_MIRROR_IMAGE_ENABLED, True) + + mode = [None] + def cb(_, _mode): + mode[0] = _mode + + comp = self.image.aio_mirror_image_get_mode(cb) + comp.wait_for_complete_and_cb() + eq(comp.get_return_value(), 0) + eq(sys.getrefcount(comp), 2) + eq(mode[0], RBD_MIRROR_IMAGE_MODE_SNAPSHOT) + + snap_id = [None] + def cb(_, _snap_id): + snap_id[0] = _snap_id + + comp = self.image.aio_mirror_image_create_snapshot(0, cb) + comp.wait_for_complete_and_cb() + assert_not_equal(snap_id[0], None) + eq(comp.get_return_value(), 0) + eq(sys.getrefcount(comp), 2) + + snaps = list(self.image.list_snaps()) + eq(2, len(snaps)) + snap = snaps[1] + eq(snap['id'], snap_id[0]) + eq(snap['namespace'], RBD_SNAP_NAMESPACE_TYPE_MIRROR) + eq(RBD_SNAP_MIRROR_STATE_PRIMARY, snap['mirror']['state']) + eq([peer_uuid], snap['mirror']['mirror_peer_uuids']) + + self.rbd.mirror_peer_remove(ioctx, peer_uuid) + +class TestTrash(object): + + def setUp(self): + global rados2 + rados2 = Rados(conffile='') + rados2.connect() + global ioctx2 + ioctx2 = rados2.open_ioctx(pool_name) + + def tearDown(self): + global ioctx2 + ioctx2.close() + global rados2 + rados2.shutdown() + + def test_move(self): + create_image() + with Image(ioctx, image_name) as image: + image_id = image.id() + + RBD().trash_move(ioctx, image_name, 1000) + RBD().trash_remove(ioctx, image_id, True) + + def test_purge(self): + create_image() + with Image(ioctx, image_name) as image: + image_name1 = image_name + image_id1 = image.id() + + create_image() + with Image(ioctx, image_name) as image: + image_name2 = image_name + image_id2 = image.id() + + RBD().trash_move(ioctx, image_name1, 0) + RBD().trash_move(ioctx, image_name2, 1000) + RBD().trash_purge(ioctx, datetime.now()) + + entries = list(RBD().trash_list(ioctx)) + eq([image_id2], [x['id'] for x in entries]) + RBD().trash_remove(ioctx, image_id2, True) + + def test_remove_denied(self): + create_image() + with Image(ioctx, image_name) as image: + image_id = image.id() + + RBD().trash_move(ioctx, image_name, 1000) + assert_raises(PermissionError, RBD().trash_remove, ioctx, image_id) + RBD().trash_remove(ioctx, image_id, True) + + def test_remove(self): + create_image() + with Image(ioctx, image_name) as image: + image_id = image.id() + + RBD().trash_move(ioctx, image_name, 0) + RBD().trash_remove(ioctx, image_id) + + def test_remove_with_progress(self): + d = {'received_callback': False} + def progress_cb(current, total): + d['received_callback'] = True + return 0 + + create_image() + with Image(ioctx, image_name) as image: + image_id = image.id() + + RBD().trash_move(ioctx, image_name, 0) + RBD().trash_remove(ioctx, image_id, on_progress=progress_cb) + eq(True, d['received_callback']) + + def test_get(self): + create_image() + with Image(ioctx, image_name) as image: + image_id = image.id() + + RBD().trash_move(ioctx, image_name, 1000) + + info = RBD().trash_get(ioctx, image_id) + eq(image_id, info['id']) + eq(image_name, info['name']) + eq('USER', info['source']) + assert(info['deferment_end_time'] > info['deletion_time']) + + RBD().trash_remove(ioctx, image_id, True) + + def test_list(self): + create_image() + with Image(ioctx, image_name) as image: + image_id1 = image.id() + image_name1 = image_name + RBD().trash_move(ioctx, image_name, 1000) + + create_image() + with Image(ioctx, image_name) as image: + image_id2 = image.id() + image_name2 = image_name + RBD().trash_move(ioctx, image_name, 1000) + + entries = list(RBD().trash_list(ioctx)) + for e in entries: + if e['id'] == image_id1: + eq(e['name'], image_name1) + elif e['id'] == image_id2: + eq(e['name'], image_name2) + else: + assert False + eq(e['source'], 'USER') + assert e['deferment_end_time'] > e['deletion_time'] + + RBD().trash_remove(ioctx, image_id1, True) + RBD().trash_remove(ioctx, image_id2, True) + + def test_restore(self): + create_image() + with Image(ioctx, image_name) as image: + image_id = image.id() + RBD().trash_move(ioctx, image_name, 1000) + RBD().trash_restore(ioctx, image_id, image_name) + remove_image() + +def test_create_group(): + create_group() + remove_group() + +def test_rename_group(): + create_group() + if group_name is not None: + rename_group() + eq(["new" + group_name], RBD().group_list(ioctx)) + RBD().group_remove(ioctx, "new" + group_name) + else: + remove_group() + +def test_list_groups_empty(): + eq([], RBD().group_list(ioctx)) + +@with_setup(create_group, remove_group) +def test_list_groups(): + eq([group_name], RBD().group_list(ioctx)) + +@with_setup(create_group) +def test_list_groups_after_removed(): + remove_group() + eq([], RBD().group_list(ioctx)) + +class TestGroups(object): + + def setUp(self): + global snap_name + self.rbd = RBD() + create_image() + self.image_names = [image_name] + self.image = Image(ioctx, image_name) + + create_group() + snap_name = get_temp_snap_name() + self.group = Group(ioctx, group_name) + + def tearDown(self): + remove_group() + self.image = None + for name in self.image_names: + RBD().remove(ioctx, name) + + def test_group_image_add(self): + self.group.add_image(ioctx, image_name) + + def test_group_image_list_empty(self): + eq([], list(self.group.list_images())) + + def test_group_image_list(self): + eq([], list(self.group.list_images())) + self.group.add_image(ioctx, image_name) + eq([image_name], [img['name'] for img in self.group.list_images()]) + + def test_group_image_list_move_to_trash(self): + eq([], list(self.group.list_images())) + with Image(ioctx, image_name) as image: + image_id = image.id() + self.group.add_image(ioctx, image_name) + eq([image_name], [img['name'] for img in self.group.list_images()]) + RBD().trash_move(ioctx, image_name, 0) + eq([], list(self.group.list_images())) + RBD().trash_restore(ioctx, image_id, image_name) + + def test_group_image_many_images(self): + eq([], list(self.group.list_images())) + self.group.add_image(ioctx, image_name) + + for x in range(0, 20): + create_image() + self.image_names.append(image_name) + self.group.add_image(ioctx, image_name) + + self.image_names.sort() + answer = [img['name'] for img in self.group.list_images()] + answer.sort() + eq(self.image_names, answer) + + def test_group_image_remove(self): + eq([], list(self.group.list_images())) + self.group.add_image(ioctx, image_name) + with Image(ioctx, image_name) as image: + eq(RBD_OPERATION_FEATURE_GROUP, + image.op_features() & RBD_OPERATION_FEATURE_GROUP) + group = image.group() + eq(group_name, group['name']) + + eq([image_name], [img['name'] for img in self.group.list_images()]) + self.group.remove_image(ioctx, image_name) + eq([], list(self.group.list_images())) + with Image(ioctx, image_name) as image: + eq(0, image.op_features() & RBD_OPERATION_FEATURE_GROUP) + + def test_group_snap(self): + global snap_name + eq([], list(self.group.list_snaps())) + self.group.create_snap(snap_name) + eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) + + for snap in self.image.list_snaps(): + eq(rbd.RBD_SNAP_NAMESPACE_TYPE_GROUP, snap['namespace']) + info = snap['group'] + eq(group_name, info['group_name']) + eq(snap_name, info['group_snap_name']) + + self.group.remove_snap(snap_name) + eq([], list(self.group.list_snaps())) + + def test_group_snap_flags(self): + global snap_name + eq([], list(self.group.list_snaps())) + + self.group.create_snap(snap_name, 0) + eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) + self.group.remove_snap(snap_name) + + self.group.create_snap(snap_name, RBD_SNAP_CREATE_SKIP_QUIESCE) + eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) + self.group.remove_snap(snap_name) + + self.group.create_snap(snap_name, RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR) + eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) + self.group.remove_snap(snap_name) + + assert_raises(InvalidArgument, self.group.create_snap, snap_name, + RBD_SNAP_CREATE_SKIP_QUIESCE | + RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR) + eq([], list(self.group.list_snaps())) + + def test_group_snap_list_many(self): + global snap_name + eq([], list(self.group.list_snaps())) + snap_names = [] + for x in range(0, 20): + snap_names.append(snap_name) + self.group.create_snap(snap_name) + snap_name = get_temp_snap_name() + + snap_names.sort() + answer = [snap['name'] for snap in self.group.list_snaps()] + answer.sort() + eq(snap_names, answer) + + def test_group_snap_namespace(self): + global snap_name + eq([], list(self.group.list_snaps())) + self.group.add_image(ioctx, image_name) + self.group.create_snap(snap_name) + eq(1, len([snap['name'] for snap in self.image.list_snaps()])) + self.group.remove_image(ioctx, image_name) + self.group.remove_snap(snap_name) + eq([], list(self.group.list_snaps())) + + def test_group_snap_rename(self): + global snap_name + new_snap_name = "new" + snap_name + + eq([], list(self.group.list_snaps())) + self.group.create_snap(snap_name) + eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) + self.group.rename_snap(snap_name, new_snap_name) + eq([new_snap_name], [snap['name'] for snap in self.group.list_snaps()]) + self.group.remove_snap(new_snap_name) + eq([], list(self.group.list_snaps())) + + def test_group_snap_rollback(self): + eq([], list(self.group.list_images())) + self.group.add_image(ioctx, image_name) + with Image(ioctx, image_name) as image: + image.write(b'\0' * 256, 0) + read = image.read(0, 256) + eq(read, b'\0' * 256) + + global snap_name + eq([], list(self.group.list_snaps())) + self.group.create_snap(snap_name) + eq([snap_name], [snap['name'] for snap in self.group.list_snaps()]) + + with Image(ioctx, image_name) as image: + data = rand_data(256) + image.write(data, 0) + read = image.read(0, 256) + eq(read, data) + + self.group.rollback_to_snap(snap_name) + with Image(ioctx, image_name) as image: + read = image.read(0, 256) + eq(read, b'\0' * 256) + + self.group.remove_image(ioctx, image_name) + eq([], list(self.group.list_images())) + self.group.remove_snap(snap_name) + eq([], list(self.group.list_snaps())) + +@with_setup(create_image, remove_image) +def test_rename(): + rbd = RBD() + image_name2 = get_temp_image_name() + +class TestMigration(object): + + def test_migration(self): + create_image() + RBD().migration_prepare(ioctx, image_name, ioctx, image_name, features=63, + order=23, stripe_unit=1<<23, stripe_count=1, + data_pool=None) + + status = RBD().migration_status(ioctx, image_name) + eq(image_name, status['source_image_name']) + eq(image_name, status['dest_image_name']) + eq(RBD_IMAGE_MIGRATION_STATE_PREPARED, status['state']) + + with Image(ioctx, image_name) as image: + source_spec = image.migration_source_spec() + eq("native", source_spec["type"]) + + RBD().migration_execute(ioctx, image_name) + RBD().migration_commit(ioctx, image_name) + remove_image() + + def test_migration_import(self): + create_image() + with Image(ioctx, image_name) as image: + image_id = image.id() + image.create_snap('snap') + + source_spec = json.dumps( + {'type': 'native', + 'pool_id': ioctx.get_pool_id(), + 'pool_namespace': '', + 'image_name': image_name, + 'image_id': image_id, + 'snap_name': 'snap'}) + dst_image_name = get_temp_image_name() + RBD().migration_prepare_import(source_spec, ioctx, dst_image_name, + features=63, order=23, stripe_unit=1<<23, + stripe_count=1, data_pool=None) + + status = RBD().migration_status(ioctx, dst_image_name) + eq('', status['source_image_name']) + eq(dst_image_name, status['dest_image_name']) + eq(RBD_IMAGE_MIGRATION_STATE_PREPARED, status['state']) + + with Image(ioctx, dst_image_name) as image: + source_spec = image.migration_source_spec() + eq("native", source_spec["type"]) + + RBD().migration_execute(ioctx, dst_image_name) + RBD().migration_commit(ioctx, dst_image_name) + + with Image(ioctx, image_name) as image: + image.remove_snap('snap') + with Image(ioctx, dst_image_name) as image: + image.remove_snap('snap') + + RBD().remove(ioctx, dst_image_name) + RBD().remove(ioctx, image_name) + + def test_migration_with_progress(self): + d = {'received_callback': False} + def progress_cb(current, total): + d['received_callback'] = True + return 0 + + create_image() + RBD().migration_prepare(ioctx, image_name, ioctx, image_name, features=63, + order=23, stripe_unit=1<<23, stripe_count=1, + data_pool=None) + RBD().migration_execute(ioctx, image_name, on_progress=progress_cb) + eq(True, d['received_callback']) + d['received_callback'] = False + + RBD().migration_commit(ioctx, image_name, on_progress=progress_cb) + eq(True, d['received_callback']) + remove_image() + + def test_migrate_abort(self): + create_image() + RBD().migration_prepare(ioctx, image_name, ioctx, image_name, features=63, + order=23, stripe_unit=1<<23, stripe_count=1, + data_pool=None) + RBD().migration_abort(ioctx, image_name) + remove_image() + + def test_migrate_abort_with_progress(self): + d = {'received_callback': False} + def progress_cb(current, total): + d['received_callback'] = True + return 0 + + create_image() + RBD().migration_prepare(ioctx, image_name, ioctx, image_name, features=63, + order=23, stripe_unit=1<<23, stripe_count=1, + data_pool=None) + RBD().migration_abort(ioctx, image_name, on_progress=progress_cb) + eq(True, d['received_callback']) + remove_image() diff --git a/src/test/pybind/test_rgwfs.py b/src/test/pybind/test_rgwfs.py new file mode 100644 index 000000000..06a88d163 --- /dev/null +++ b/src/test/pybind/test_rgwfs.py @@ -0,0 +1,144 @@ +# vim: expandtab smarttab shiftwidth=4 softtabstop=4 +from nose.tools import assert_raises, assert_equal, with_setup +import rgw as librgwfs + +rgwfs = None +root_handler = None +root_dir_handler = None + + +def setup_module(): + global rgwfs + global root_handler + rgwfs = librgwfs.LibRGWFS("testid", "", "") + root_handler = rgwfs.mount() + + +def teardown_module(): + global rgwfs + rgwfs.shutdown() + + +def setup_test(): + global root_dir_handler + + names = [] + + try: + root_dir_handler = rgwfs.opendir(root_handler, b"bucket", 0) + except Exception: + root_dir_handler = rgwfs.mkdir(root_handler, b"bucket", 0) + + def cb(name, offset, flags): + names.append(name) + rgwfs.readdir(root_dir_handler, cb, 0, 0) + for name in names: + rgwfs.unlink(root_dir_handler, name, 0) + + +@with_setup(setup_test) +def test_version(): + rgwfs.version() + + +@with_setup(setup_test) +def test_fstat(): + stat = rgwfs.fstat(root_dir_handler) + assert(len(stat) == 13) + file_handler = rgwfs.create(root_dir_handler, b'file-1', 0) + stat = rgwfs.fstat(file_handler) + assert(len(stat) == 13) + rgwfs.close(file_handler) + + +@with_setup(setup_test) +def test_statfs(): + stat = rgwfs.statfs() + assert(len(stat) == 11) + + +@with_setup(setup_test) +def test_fsync(): + fd = rgwfs.create(root_dir_handler, b'file-1', 0) + rgwfs.write(fd, 0, b"asdf") + rgwfs.fsync(fd, 0) + rgwfs.write(fd, 4, b"qwer") + rgwfs.fsync(fd, 1) + rgwfs.close(fd) + + +@with_setup(setup_test) +def test_directory(): + dir_handler = rgwfs.mkdir(root_dir_handler, b"temp-directory", 0) + rgwfs.close(dir_handler) + rgwfs.unlink(root_dir_handler, b"temp-directory") + + +@with_setup(setup_test) +def test_walk_dir(): + dirs = [b"dir-1", b"dir-2", b"dir-3"] + handles = [] + for i in dirs: + d = rgwfs.mkdir(root_dir_handler, i, 0) + handles.append(d) + entries = [] + + def cb(name, offset): + entries.append((name, offset)) + + offset, eof = rgwfs.readdir(root_dir_handler, cb, 0) + + for i in handles: + rgwfs.close(i) + + for name, _ in entries: + assert(name in dirs) + rgwfs.unlink(root_dir_handler, name) + + +@with_setup(setup_test) +def test_rename(): + file_handler = rgwfs.create(root_dir_handler, b"a", 0) + rgwfs.close(file_handler) + rgwfs.rename(root_dir_handler, b"a", root_dir_handler, b"b") + file_handler = rgwfs.open(root_dir_handler, b"b", 0) + rgwfs.fstat(file_handler) + rgwfs.close(file_handler) + rgwfs.unlink(root_dir_handler, b"b") + + +@with_setup(setup_test) +def test_open(): + assert_raises(librgwfs.ObjectNotFound, rgwfs.open, + root_dir_handler, b'file-1', 0) + assert_raises(librgwfs.ObjectNotFound, rgwfs.open, + root_dir_handler, b'file-1', 0) + fd = rgwfs.create(root_dir_handler, b'file-1', 0) + rgwfs.write(fd, 0, b"asdf") + rgwfs.close(fd) + fd = rgwfs.open(root_dir_handler, b'file-1', 0) + assert_equal(rgwfs.read(fd, 0, 4), b"asdf") + rgwfs.close(fd) + fd = rgwfs.open(root_dir_handler, b'file-1', 0) + rgwfs.write(fd, 0, b"aaaazxcv") + rgwfs.close(fd) + fd = rgwfs.open(root_dir_handler, b'file-1', 0) + assert_equal(rgwfs.read(fd, 4, 4), b"zxcv") + rgwfs.close(fd) + fd = rgwfs.open(root_dir_handler, b'file-1', 0) + assert_equal(rgwfs.read(fd, 0, 4), b"aaaa") + rgwfs.close(fd) + rgwfs.unlink(root_dir_handler, b"file-1") + + +@with_setup(setup_test) +def test_mount_unmount(): + global root_handler + global root_dir_handler + test_directory() + rgwfs.close(root_dir_handler) + rgwfs.close(root_handler) + rgwfs.unmount() + root_handler = rgwfs.mount() + root_dir_handler = rgwfs.opendir(root_handler, b"bucket", 0) + test_open() |