summaryrefslogtreecommitdiffstats
path: root/qa/workunits
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /qa/workunits
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/workunits')
-rw-r--r--qa/workunits/Makefile4
-rwxr-xr-xqa/workunits/caps/mon_commands.sh25
-rwxr-xr-xqa/workunits/ceph-helpers-root.sh129
-rwxr-xr-xqa/workunits/ceph-tests/ceph-admin-commands.sh10
-rwxr-xr-xqa/workunits/cephadm/create_iscsi_disks.sh36
-rwxr-xr-xqa/workunits/cephadm/test_adoption.sh60
-rwxr-xr-xqa/workunits/cephadm/test_cephadm.sh474
-rwxr-xr-xqa/workunits/cephadm/test_dashboard_e2e.sh107
-rwxr-xr-xqa/workunits/cephadm/test_iscsi_etc_hosts.sh21
-rwxr-xr-xqa/workunits/cephadm/test_iscsi_pids_limit.sh29
-rwxr-xr-xqa/workunits/cephadm/test_repos.sh45
-rwxr-xr-xqa/workunits/cephtool/test.sh2991
-rwxr-xr-xqa/workunits/cephtool/test_daemon.sh43
-rwxr-xr-xqa/workunits/cephtool/test_kvstore_tool.sh71
-rwxr-xr-xqa/workunits/client/test.sh5
-rwxr-xr-xqa/workunits/cls/test_cls_2pc_queue.sh5
-rwxr-xr-xqa/workunits/cls/test_cls_cas.sh6
-rwxr-xr-xqa/workunits/cls/test_cls_cmpomap.sh5
-rwxr-xr-xqa/workunits/cls/test_cls_hello.sh5
-rwxr-xr-xqa/workunits/cls/test_cls_journal.sh6
-rwxr-xr-xqa/workunits/cls/test_cls_lock.sh5
-rwxr-xr-xqa/workunits/cls/test_cls_log.sh5
-rwxr-xr-xqa/workunits/cls/test_cls_numops.sh5
-rwxr-xr-xqa/workunits/cls/test_cls_rbd.sh6
-rwxr-xr-xqa/workunits/cls/test_cls_refcount.sh5
-rwxr-xr-xqa/workunits/cls/test_cls_rgw.sh8
-rwxr-xr-xqa/workunits/cls/test_cls_rgw_gc.sh5
-rwxr-xr-xqa/workunits/cls/test_cls_rgw_stats.sh5
-rwxr-xr-xqa/workunits/cls/test_cls_sdk.sh5
-rw-r--r--qa/workunits/direct_io/.gitignore3
-rw-r--r--qa/workunits/direct_io/Makefile11
-rwxr-xr-xqa/workunits/direct_io/big.sh6
-rw-r--r--qa/workunits/direct_io/direct_io_test.c312
-rwxr-xr-xqa/workunits/direct_io/misc.sh16
-rw-r--r--qa/workunits/direct_io/test_short_dio_read.c57
-rw-r--r--qa/workunits/direct_io/test_sync_io.c250
-rw-r--r--qa/workunits/erasure-code/.gitignore2
-rw-r--r--qa/workunits/erasure-code/bench.html34
-rwxr-xr-xqa/workunits/erasure-code/bench.sh192
-rwxr-xr-xqa/workunits/erasure-code/encode-decode-non-regression.sh40
-rw-r--r--qa/workunits/erasure-code/examples.css97
-rw-r--r--qa/workunits/erasure-code/jquery.flot.categories.js190
-rw-r--r--qa/workunits/erasure-code/jquery.flot.js3168
-rw-r--r--qa/workunits/erasure-code/jquery.js9472
-rw-r--r--qa/workunits/erasure-code/plot.js82
-rw-r--r--qa/workunits/false.sh3
-rw-r--r--qa/workunits/fs/.gitignore1
-rw-r--r--qa/workunits/fs/Makefile11
-rwxr-xr-xqa/workunits/fs/cephfs_mirror_ha_gen.sh69
-rwxr-xr-xqa/workunits/fs/cephfs_mirror_ha_verify.sh40
-rw-r--r--qa/workunits/fs/cephfs_mirror_helpers.sh66
-rwxr-xr-xqa/workunits/fs/damage/test-first-damage.sh194
-rwxr-xr-xqa/workunits/fs/fscrypt.sh119
-rwxr-xr-xqa/workunits/fs/full/subvolume_clone.sh114
-rwxr-xr-xqa/workunits/fs/full/subvolume_rm.sh72
-rwxr-xr-xqa/workunits/fs/full/subvolume_snapshot_rm.sh86
-rwxr-xr-xqa/workunits/fs/maxentries/maxentries.sh155
-rwxr-xr-xqa/workunits/fs/misc/acl.sh50
-rwxr-xr-xqa/workunits/fs/misc/chmod.sh60
-rwxr-xr-xqa/workunits/fs/misc/dac_override.sh19
-rwxr-xr-xqa/workunits/fs/misc/direct_io.py42
-rwxr-xr-xqa/workunits/fs/misc/dirfrag.sh52
-rwxr-xr-xqa/workunits/fs/misc/filelock_deadlock.py72
-rwxr-xr-xqa/workunits/fs/misc/filelock_interrupt.py94
-rwxr-xr-xqa/workunits/fs/misc/i_complete_vs_rename.sh31
-rwxr-xr-xqa/workunits/fs/misc/layout_vxattrs.sh115
-rwxr-xr-xqa/workunits/fs/misc/mkpool_layout_vxattrs.sh15
-rwxr-xr-xqa/workunits/fs/misc/multiple_rsync.sh25
-rwxr-xr-xqa/workunits/fs/misc/rstats.sh80
-rwxr-xr-xqa/workunits/fs/misc/trivial_sync.sh7
-rwxr-xr-xqa/workunits/fs/misc/xattrs.sh14
-rwxr-xr-xqa/workunits/fs/multiclient_sync_read_eof.py42
-rwxr-xr-xqa/workunits/fs/norstats/kernel_untar_tar.sh26
-rwxr-xr-xqa/workunits/fs/quota/quota.sh128
-rwxr-xr-xqa/workunits/fs/snap-hierarchy.sh24
-rwxr-xr-xqa/workunits/fs/snaps/snap-rm-diff.sh10
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-1.sh29
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-2.sh59
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-authwb.sh12
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-capwb.sh33
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-dir-rename.sh17
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-double-null.sh23
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-estale.sh13
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-git-ceph.sh52
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-hardlink.sh25
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-intodir.sh22
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh42
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-name-limits.sh27
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-parents.sh39
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-realm-split.sh31
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-snap-rename.sh33
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh24
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-upchildrealms.sh28
-rwxr-xr-xqa/workunits/fs/snaps/snaptest-xattrwb.sh29
-rwxr-xr-xqa/workunits/fs/snaps/untar_snap_rm.sh18
-rw-r--r--qa/workunits/fs/test_o_trunc.c45
-rwxr-xr-xqa/workunits/fs/test_o_trunc.sh7
-rwxr-xr-xqa/workunits/fs/test_python.sh6
-rwxr-xr-xqa/workunits/hadoop/repl.sh42
-rwxr-xr-xqa/workunits/hadoop/terasort.sh76
-rwxr-xr-xqa/workunits/hadoop/wordcount.sh35
-rwxr-xr-xqa/workunits/kernel_untar_build.sh20
-rwxr-xr-xqa/workunits/libcephfs/test.sh10
-rwxr-xr-xqa/workunits/mgr/test_localpool.sh21
-rwxr-xr-xqa/workunits/mgr/test_per_module_finisher.sh38
-rwxr-xr-xqa/workunits/mon/auth_caps.sh130
-rwxr-xr-xqa/workunits/mon/auth_key_rotation.sh58
-rw-r--r--qa/workunits/mon/caps.py359
-rwxr-xr-xqa/workunits/mon/caps.sh90
-rwxr-xr-xqa/workunits/mon/config.sh136
-rwxr-xr-xqa/workunits/mon/crush_ops.sh237
-rwxr-xr-xqa/workunits/mon/osd.sh24
-rwxr-xr-xqa/workunits/mon/pg_autoscaler.sh156
-rwxr-xr-xqa/workunits/mon/ping.py106
-rwxr-xr-xqa/workunits/mon/pool_ops.sh104
-rwxr-xr-xqa/workunits/mon/rbd_snaps_ops.sh61
-rwxr-xr-xqa/workunits/mon/test_config_key_caps.sh201
-rwxr-xr-xqa/workunits/mon/test_mon_config_key.py463
-rwxr-xr-xqa/workunits/mon/test_mon_osdmap_prune.sh205
-rwxr-xr-xqa/workunits/mon/test_noautoscale_flag.sh104
-rwxr-xr-xqa/workunits/objectstore/test_fuse.sh129
-rwxr-xr-xqa/workunits/osdc/stress_objectcacher.sh28
-rwxr-xr-xqa/workunits/post-file.sh8
-rwxr-xr-xqa/workunits/rados/clone.sh13
-rwxr-xr-xqa/workunits/rados/load-gen-big.sh10
-rwxr-xr-xqa/workunits/rados/load-gen-mix-small-long.sh10
-rwxr-xr-xqa/workunits/rados/load-gen-mix-small.sh10
-rwxr-xr-xqa/workunits/rados/load-gen-mix.sh10
-rwxr-xr-xqa/workunits/rados/load-gen-mostlyread.sh10
-rwxr-xr-xqa/workunits/rados/stress_watch.sh7
-rwxr-xr-xqa/workunits/rados/test.sh62
-rwxr-xr-xqa/workunits/rados/test_alloc_hint.sh177
-rwxr-xr-xqa/workunits/rados/test_cache_pool.sh170
-rwxr-xr-xqa/workunits/rados/test_crash.sh44
-rwxr-xr-xqa/workunits/rados/test_crushdiff.sh103
-rwxr-xr-xqa/workunits/rados/test_dedup_tool.sh458
-rwxr-xr-xqa/workunits/rados/test_envlibrados_for_rocksdb.sh97
-rwxr-xr-xqa/workunits/rados/test_hang.sh8
-rwxr-xr-xqa/workunits/rados/test_health_warnings.sh76
-rwxr-xr-xqa/workunits/rados/test_large_omap_detection.py134
-rwxr-xr-xqa/workunits/rados/test_libcephsqlite.sh136
-rwxr-xr-xqa/workunits/rados/test_librados_build.sh87
-rwxr-xr-xqa/workunits/rados/test_pool_access.sh108
-rwxr-xr-xqa/workunits/rados/test_pool_quota.sh68
-rwxr-xr-xqa/workunits/rados/test_python.sh5
-rwxr-xr-xqa/workunits/rados/test_rados_timeouts.sh48
-rwxr-xr-xqa/workunits/rados/test_rados_tool.sh924
-rwxr-xr-xqa/workunits/rados/version_number_sanity.sh30
-rwxr-xr-xqa/workunits/rbd/cli_generic.sh1715
-rwxr-xr-xqa/workunits/rbd/cli_migration.sh357
-rwxr-xr-xqa/workunits/rbd/concurrent.sh375
-rwxr-xr-xqa/workunits/rbd/crimson/test_crimson_librbd.sh35
-rwxr-xr-xqa/workunits/rbd/diff.sh53
-rwxr-xr-xqa/workunits/rbd/diff_continuous.sh106
-rwxr-xr-xqa/workunits/rbd/huge-tickets.sh41
-rwxr-xr-xqa/workunits/rbd/image_read.sh680
-rwxr-xr-xqa/workunits/rbd/import_export.sh259
-rwxr-xr-xqa/workunits/rbd/issue-20295.sh18
-rwxr-xr-xqa/workunits/rbd/journal.sh326
-rwxr-xr-xqa/workunits/rbd/kernel.sh100
-rwxr-xr-xqa/workunits/rbd/krbd_data_pool.sh206
-rwxr-xr-xqa/workunits/rbd/krbd_exclusive_option.sh233
-rwxr-xr-xqa/workunits/rbd/krbd_fallocate.sh151
-rwxr-xr-xqa/workunits/rbd/krbd_huge_osdmap.sh51
-rwxr-xr-xqa/workunits/rbd/krbd_latest_osdmap_on_map.sh30
-rwxr-xr-xqa/workunits/rbd/krbd_namespaces.sh116
-rwxr-xr-xqa/workunits/rbd/krbd_rxbounce.sh103
-rwxr-xr-xqa/workunits/rbd/krbd_stable_writes.sh141
-rwxr-xr-xqa/workunits/rbd/krbd_udev_enumerate.sh66
-rwxr-xr-xqa/workunits/rbd/krbd_udev_netlink_enobufs.sh24
-rwxr-xr-xqa/workunits/rbd/krbd_udev_netns.sh86
-rwxr-xr-xqa/workunits/rbd/krbd_udev_symlinks.sh116
-rwxr-xr-xqa/workunits/rbd/krbd_wac.sh40
-rwxr-xr-xqa/workunits/rbd/krbd_watch_errors.sh53
-rwxr-xr-xqa/workunits/rbd/luks-encryption.sh217
-rwxr-xr-xqa/workunits/rbd/map-snapshot-io.sh17
-rwxr-xr-xqa/workunits/rbd/map-unmap.sh45
-rwxr-xr-xqa/workunits/rbd/merge_diff.sh477
-rwxr-xr-xqa/workunits/rbd/notify_master.sh5
-rwxr-xr-xqa/workunits/rbd/notify_slave.sh5
-rwxr-xr-xqa/workunits/rbd/permissions.sh269
-rwxr-xr-xqa/workunits/rbd/qemu-iotests.sh47
-rwxr-xr-xqa/workunits/rbd/qemu_dynamic_features.sh46
-rwxr-xr-xqa/workunits/rbd/qemu_rebuild_object_map.sh37
-rwxr-xr-xqa/workunits/rbd/qos.sh90
-rwxr-xr-xqa/workunits/rbd/rbd-ggate.sh239
-rwxr-xr-xqa/workunits/rbd/rbd-nbd.sh500
-rwxr-xr-xqa/workunits/rbd/rbd_groups.sh258
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_bootstrap.sh58
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_fsx_compare.sh38
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_fsx_prepare.sh10
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_ha.sh210
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_helpers.sh1488
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_journal.sh614
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_snapshot.sh517
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_stress.sh221
-rwxr-xr-xqa/workunits/rbd/rbd_support_module_recovery.sh77
-rwxr-xr-xqa/workunits/rbd/read-flags.sh61
-rwxr-xr-xqa/workunits/rbd/simple_big.sh12
-rwxr-xr-xqa/workunits/rbd/test_admin_socket.sh151
-rwxr-xr-xqa/workunits/rbd/test_librbd.sh9
-rwxr-xr-xqa/workunits/rbd/test_librbd_python.sh12
-rwxr-xr-xqa/workunits/rbd/test_lock_fence.sh48
-rwxr-xr-xqa/workunits/rbd/test_rbd_mirror.sh9
-rwxr-xr-xqa/workunits/rbd/test_rbd_tasks.sh276
-rwxr-xr-xqa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh34
-rwxr-xr-xqa/workunits/rbd/verify_pool.sh27
-rwxr-xr-xqa/workunits/rename/all.sh37
-rwxr-xr-xqa/workunits/rename/dir_pri_nul.sh28
-rwxr-xr-xqa/workunits/rename/dir_pri_pri.sh11
-rw-r--r--qa/workunits/rename/plan.txt111
-rwxr-xr-xqa/workunits/rename/prepare.sh21
-rwxr-xr-xqa/workunits/rename/pri_nul.sh11
-rwxr-xr-xqa/workunits/rename/pri_pri.sh12
-rwxr-xr-xqa/workunits/rename/pri_rem.sh31
-rwxr-xr-xqa/workunits/rename/rem_nul.sh29
-rwxr-xr-xqa/workunits/rename/rem_pri.sh29
-rwxr-xr-xqa/workunits/rename/rem_rem.sh61
-rwxr-xr-xqa/workunits/rest/test-restful.sh10
-rwxr-xr-xqa/workunits/rest/test_mgr_rest_api.py98
-rwxr-xr-xqa/workunits/restart/test-backtraces.py250
-rwxr-xr-xqa/workunits/rgw/common.py103
-rwxr-xr-xqa/workunits/rgw/keystone-fake-server.py208
-rwxr-xr-xqa/workunits/rgw/keystone-service-token.sh34
-rw-r--r--qa/workunits/rgw/olh_noname_key1
-rw-r--r--qa/workunits/rgw/olh_noname_valbin0 -> 71 bytes
-rwxr-xr-xqa/workunits/rgw/run-bucket-check.sh19
-rwxr-xr-xqa/workunits/rgw/run-datacache.sh19
-rwxr-xr-xqa/workunits/rgw/run-reshard.sh23
-rwxr-xr-xqa/workunits/rgw/run-s3tests.sh39
-rwxr-xr-xqa/workunits/rgw/run-versioning.sh19
-rwxr-xr-xqa/workunits/rgw/s3_bucket_quota.pl393
-rwxr-xr-xqa/workunits/rgw/s3_multipart_upload.pl151
-rwxr-xr-xqa/workunits/rgw/s3_user_quota.pl191
-rw-r--r--qa/workunits/rgw/s3_utilities.pm233
-rwxr-xr-xqa/workunits/rgw/test-keystone-service-token.py189
-rwxr-xr-xqa/workunits/rgw/test_librgw_file.sh59
-rwxr-xr-xqa/workunits/rgw/test_rgw_bucket_check.py194
-rwxr-xr-xqa/workunits/rgw/test_rgw_datacache.py209
-rwxr-xr-xqa/workunits/rgw/test_rgw_gc_log.sh5
-rwxr-xr-xqa/workunits/rgw/test_rgw_obj.sh5
-rwxr-xr-xqa/workunits/rgw/test_rgw_orphan_list.sh519
-rwxr-xr-xqa/workunits/rgw/test_rgw_reshard.py311
-rwxr-xr-xqa/workunits/rgw/test_rgw_s3_mp_reupload.py121
-rwxr-xr-xqa/workunits/rgw/test_rgw_s3_mp_reupload.sh110
-rwxr-xr-xqa/workunits/rgw/test_rgw_throttle.sh5
-rwxr-xr-xqa/workunits/rgw/test_rgw_versioning.py110
-rwxr-xr-xqa/workunits/suites/blogbench.sh15
-rwxr-xr-xqa/workunits/suites/bonnie.sh11
-rwxr-xr-xqa/workunits/suites/cephfs_journal_tool_smoke.sh92
-rwxr-xr-xqa/workunits/suites/dbench-short.sh5
-rwxr-xr-xqa/workunits/suites/dbench.sh6
-rwxr-xr-xqa/workunits/suites/ffsb.sh31
-rwxr-xr-xqa/workunits/suites/fio.sh42
-rwxr-xr-xqa/workunits/suites/fsstress.sh17
-rwxr-xr-xqa/workunits/suites/fsx.sh16
-rwxr-xr-xqa/workunits/suites/fsync-tester.sh13
-rwxr-xr-xqa/workunits/suites/iogen.sh17
-rwxr-xr-xqa/workunits/suites/iozone-sync.sh22
-rwxr-xr-xqa/workunits/suites/iozone.sh7
-rwxr-xr-xqa/workunits/suites/pjd.sh17
-rw-r--r--qa/workunits/suites/random_write.32.ffsb48
-rwxr-xr-xqa/workunits/test_telemetry_pacific.sh23
-rwxr-xr-xqa/workunits/test_telemetry_pacific_x.sh59
-rwxr-xr-xqa/workunits/test_telemetry_quincy.sh44
-rwxr-xr-xqa/workunits/test_telemetry_quincy_x.sh40
-rwxr-xr-xqa/workunits/true.sh3
-rw-r--r--qa/workunits/windows/libvirt_vm/autounattend.xml157
-rw-r--r--qa/workunits/windows/libvirt_vm/first-logon.ps142
-rw-r--r--qa/workunits/windows/libvirt_vm/setup.ps143
-rwxr-xr-xqa/workunits/windows/libvirt_vm/setup.sh162
-rw-r--r--qa/workunits/windows/libvirt_vm/utils.ps1130
-rw-r--r--qa/workunits/windows/run-tests.ps129
-rw-r--r--qa/workunits/windows/run-tests.sh11
-rw-r--r--qa/workunits/windows/test_rbd_wnbd.py919
275 files changed, 43789 insertions, 0 deletions
diff --git a/qa/workunits/Makefile b/qa/workunits/Makefile
new file mode 100644
index 000000000..f75f5dfd4
--- /dev/null
+++ b/qa/workunits/Makefile
@@ -0,0 +1,4 @@
+DIRS = direct_io fs
+
+all:
+ for d in $(DIRS) ; do ( cd $$d ; $(MAKE) all ) ; done
diff --git a/qa/workunits/caps/mon_commands.sh b/qa/workunits/caps/mon_commands.sh
new file mode 100755
index 000000000..5b5bce62e
--- /dev/null
+++ b/qa/workunits/caps/mon_commands.sh
@@ -0,0 +1,25 @@
+#!/bin/sh -ex
+
+ceph-authtool --create-keyring k --gen-key -p --name client.xx
+ceph auth add -i k client.xx mon "allow command foo; allow command bar *; allow command baz ...; allow command foo add * mon allow\\ rwx osd allow\\ *"
+
+( ceph -k k -n client.xx foo || true ) | grep 'unrecog'
+( ceph -k k -n client.xx foo ooo || true ) | grep 'Access denied'
+( ceph -k k -n client.xx fo || true ) | grep 'Access denied'
+( ceph -k k -n client.xx fooo || true ) | grep 'Access denied'
+
+( ceph -k k -n client.xx bar || true ) | grep 'Access denied'
+( ceph -k k -n client.xx bar a || true ) | grep 'unrecog'
+( ceph -k k -n client.xx bar a b c || true ) | grep 'Access denied'
+( ceph -k k -n client.xx ba || true ) | grep 'Access denied'
+( ceph -k k -n client.xx barr || true ) | grep 'Access denied'
+
+( ceph -k k -n client.xx baz || true ) | grep -v 'Access denied'
+( ceph -k k -n client.xx baz a || true ) | grep -v 'Access denied'
+( ceph -k k -n client.xx baz a b || true ) | grep -v 'Access denied'
+
+( ceph -k k -n client.xx foo add osd.1 -i k mon 'allow rwx' osd 'allow *' || true ) | grep 'unrecog'
+( ceph -k k -n client.xx foo add osd a b c -i k mon 'allow rwx' osd 'allow *' || true ) | grep 'Access denied'
+( ceph -k k -n client.xx foo add osd a b c -i k mon 'allow *' || true ) | grep 'Access denied'
+
+echo OK \ No newline at end of file
diff --git a/qa/workunits/ceph-helpers-root.sh b/qa/workunits/ceph-helpers-root.sh
new file mode 100755
index 000000000..5b5d2b409
--- /dev/null
+++ b/qa/workunits/ceph-helpers-root.sh
@@ -0,0 +1,129 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2015 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+#######################################################################
+
+function distro_id() {
+ source /etc/os-release
+ echo $ID
+}
+
+function distro_version() {
+ source /etc/os-release
+ echo $VERSION
+}
+
+function install() {
+ if [ $(distro_id) = "ubuntu" ]; then
+ sudo apt-get purge -y gcc
+ sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
+ fi
+ for package in "$@" ; do
+ install_one $package
+ done
+ if [ $(distro_id) = "ubuntu" ]; then
+ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-11 11
+ sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-11 11
+ sudo update-alternatives --install /usr/bin/cc cc /usr/bin/gcc 11
+ sudo update-alternatives --set cc /usr/bin/gcc
+ sudo update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++ 11
+ sudo update-alternatives --set c++ /usr/bin/g++
+ fi
+}
+
+function install_one() {
+ case $(distro_id) in
+ ubuntu|debian|devuan|softiron)
+ sudo env DEBIAN_FRONTEND=noninteractive apt-get install -y "$@"
+ ;;
+ centos|fedora|rhel)
+ sudo yum install -y "$@"
+ ;;
+ opensuse*|suse|sles)
+ sudo zypper --non-interactive install "$@"
+ ;;
+ *)
+ echo "$(distro_id) is unknown, $@ will have to be installed manually."
+ ;;
+ esac
+}
+
+function install_pkg_on_ubuntu {
+ local project=$1
+ shift
+ local sha1=$1
+ shift
+ local codename=$1
+ shift
+ local force=$1
+ shift
+ local pkgs=$@
+ local missing_pkgs
+ if [ $force = "force" ]; then
+ missing_pkgs="$@"
+ else
+ for pkg in $pkgs; do
+ if ! dpkg -s $pkg &> /dev/null; then
+ missing_pkgs+=" $pkg"
+ fi
+ done
+ fi
+ if test -n "$missing_pkgs"; then
+ local shaman_url="https://shaman.ceph.com/api/repos/${project}/master/${sha1}/ubuntu/${codename}/repo"
+ sudo curl --silent --location $shaman_url --output /etc/apt/sources.list.d/$project.list
+ sudo env DEBIAN_FRONTEND=noninteractive apt-get update -y -o Acquire::Languages=none -o Acquire::Translation=none || true
+ sudo env DEBIAN_FRONTEND=noninteractive apt-get install --allow-unauthenticated -y $missing_pkgs
+ fi
+}
+
+#######################################################################
+
+function control_osd() {
+ local action=$1
+ local id=$2
+
+ sudo systemctl $action ceph-osd@$id
+
+ return 0
+}
+
+#######################################################################
+
+function pool_read_write() {
+ local size=${1:-1}
+ local dir=/tmp
+ local timeout=360
+ local test_pool=test_pool
+
+ ceph osd pool delete $test_pool $test_pool --yes-i-really-really-mean-it || return 1
+ ceph osd pool create $test_pool 4 || return 1
+ ceph osd pool set $test_pool size $size --yes-i-really-mean-it || return 1
+ ceph osd pool set $test_pool min_size $size || return 1
+ ceph osd pool application enable $test_pool rados
+
+ echo FOO > $dir/BAR
+ timeout $timeout rados --pool $test_pool put BAR $dir/BAR || return 1
+ timeout $timeout rados --pool $test_pool get BAR $dir/BAR.copy || return 1
+ diff $dir/BAR $dir/BAR.copy || return 1
+ ceph osd pool delete $test_pool $test_pool --yes-i-really-really-mean-it || return 1
+}
+
+#######################################################################
+
+set -x
+
+"$@"
diff --git a/qa/workunits/ceph-tests/ceph-admin-commands.sh b/qa/workunits/ceph-tests/ceph-admin-commands.sh
new file mode 100755
index 000000000..4a9f0a66f
--- /dev/null
+++ b/qa/workunits/ceph-tests/ceph-admin-commands.sh
@@ -0,0 +1,10 @@
+#!/bin/sh -ex
+
+ceph -s
+rados lspools
+rbd ls
+# check that the monitors work
+ceph osd set nodown
+ceph osd unset nodown
+
+exit 0
diff --git a/qa/workunits/cephadm/create_iscsi_disks.sh b/qa/workunits/cephadm/create_iscsi_disks.sh
new file mode 100755
index 000000000..45319e3a1
--- /dev/null
+++ b/qa/workunits/cephadm/create_iscsi_disks.sh
@@ -0,0 +1,36 @@
+#!/bin/bash -ex
+# Create some file-backed iSCSI targets and attach them locally.
+
+# Exit if it's not CentOS
+if ! grep -q rhel /etc/*-release; then
+ echo "The script only supports CentOS."
+ exit 1
+fi
+
+[ -z "$SUDO" ] && SUDO=sudo
+
+# 15 GB
+DISK_FILE_SIZE="16106127360"
+
+$SUDO yum install -y targetcli iscsi-initiator-utils
+
+TARGET_NAME="iqn.2003-01.org.linux-iscsi.$(hostname).x8664:sn.foobar"
+$SUDO targetcli /iscsi create ${TARGET_NAME}
+$SUDO targetcli /iscsi/${TARGET_NAME}/tpg1/portals delete 0.0.0.0 3260
+$SUDO targetcli /iscsi/${TARGET_NAME}/tpg1/portals create 127.0.0.1 3260
+$SUDO targetcli /iscsi/${TARGET_NAME}/tpg1 set attribute generate_node_acls=1
+$SUDO targetcli /iscsi/${TARGET_NAME}/tpg1 set attribute demo_mode_write_protect=0
+
+for i in $(seq 3); do
+ # Create truncated files, and add them as luns
+ DISK_FILE="/tmp/disk${i}"
+ $SUDO truncate --size ${DISK_FILE_SIZE} ${DISK_FILE}
+
+ $SUDO targetcli /backstores/fileio create "lun${i}" ${DISK_FILE}
+ # Workaround for https://tracker.ceph.com/issues/47758
+ $SUDO targetcli "/backstores/fileio/lun${i}" set attribute optimal_sectors=0
+ $SUDO targetcli /iscsi/${TARGET_NAME}/tpg1/luns create "/backstores/fileio/lun${i}"
+done
+
+$SUDO iscsiadm -m discovery -t sendtargets -p 127.0.0.1
+$SUDO iscsiadm -m node -p 127.0.0.1 -T ${TARGET_NAME} -l
diff --git a/qa/workunits/cephadm/test_adoption.sh b/qa/workunits/cephadm/test_adoption.sh
new file mode 100755
index 000000000..68580eb62
--- /dev/null
+++ b/qa/workunits/cephadm/test_adoption.sh
@@ -0,0 +1,60 @@
+#!/bin/bash -ex
+
+SCRIPT_NAME=$(basename ${BASH_SOURCE[0]})
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+CEPHADM_SRC_DIR=${SCRIPT_DIR}/../../../src/cephadm
+CORPUS_COMMIT=9cd9ad020d93b0b420924fec55da307aff8bd422
+
+[ -z "$SUDO" ] && SUDO=sudo
+
+[ -d "$TMPDIR" ] || TMPDIR=$(mktemp -d tmp.$SCRIPT_NAME.XXXXXX)
+trap "$SUDO rm -rf $TMPDIR" EXIT
+
+if [ -z "$CEPHADM" ]; then
+ CEPHADM=`mktemp -p $TMPDIR tmp.cephadm.XXXXXX`
+ ${CEPHADM_SRC_DIR}/build.sh "$CEPHADM"
+fi
+
+# at this point, we need $CEPHADM set
+if ! [ -x "$CEPHADM" ]; then
+ echo "cephadm not found. Please set \$CEPHADM"
+ exit 1
+fi
+
+# combine into a single var
+CEPHADM_BIN="$CEPHADM"
+CEPHADM="$SUDO $CEPHADM_BIN"
+
+## adopt
+CORPUS_GIT_SUBMOD="cephadm-adoption-corpus"
+GIT_CLONE_DIR=${TMPDIR}/${CORPUS_GIT_SUBMOD}
+git clone https://github.com/ceph/$CORPUS_GIT_SUBMOD $GIT_CLONE_DIR
+
+git -C $GIT_CLONE_DIR checkout $CORPUS_COMMIT
+CORPUS_DIR=${GIT_CLONE_DIR}/archive
+
+for subdir in `ls ${CORPUS_DIR}`; do
+ for tarfile in `ls ${CORPUS_DIR}/${subdir} | grep .tgz`; do
+ tarball=${CORPUS_DIR}/${subdir}/${tarfile}
+ FSID_LEGACY=`echo "$tarfile" | cut -c 1-36`
+ TMP_TAR_DIR=`mktemp -d -p $TMPDIR`
+ $SUDO tar xzvf $tarball -C $TMP_TAR_DIR
+ NAMES=$($CEPHADM ls --legacy-dir $TMP_TAR_DIR | jq -r '.[].name')
+ for name in $NAMES; do
+ $CEPHADM adopt \
+ --style legacy \
+ --legacy-dir $TMP_TAR_DIR \
+ --name $name
+ # validate after adopt
+ out=$($CEPHADM ls | jq '.[]' \
+ | jq 'select(.name == "'$name'")')
+ echo $out | jq -r '.style' | grep 'cephadm'
+ echo $out | jq -r '.fsid' | grep $FSID_LEGACY
+ done
+ # clean-up before next iter
+ $CEPHADM rm-cluster --fsid $FSID_LEGACY --force
+ $SUDO rm -rf $TMP_TAR_DIR
+ done
+done
+
+echo "OK"
diff --git a/qa/workunits/cephadm/test_cephadm.sh b/qa/workunits/cephadm/test_cephadm.sh
new file mode 100755
index 000000000..7d06a3326
--- /dev/null
+++ b/qa/workunits/cephadm/test_cephadm.sh
@@ -0,0 +1,474 @@
+#!/bin/bash -ex
+
+SCRIPT_NAME=$(basename ${BASH_SOURCE[0]})
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+# cleanup during exit
+[ -z "$CLEANUP" ] && CLEANUP=true
+
+FSID='00000000-0000-0000-0000-0000deadbeef'
+
+# images that are used
+IMAGE_MAIN=${IMAGE_MAIN:-'quay.ceph.io/ceph-ci/ceph:main'}
+IMAGE_PACIFIC=${IMAGE_PACIFIC:-'quay.ceph.io/ceph-ci/ceph:pacific'}
+#IMAGE_OCTOPUS=${IMAGE_OCTOPUS:-'quay.ceph.io/ceph-ci/ceph:octopus'}
+IMAGE_DEFAULT=${IMAGE_MAIN}
+
+OSD_IMAGE_NAME="${SCRIPT_NAME%.*}_osd.img"
+OSD_IMAGE_SIZE='6G'
+OSD_TO_CREATE=2
+OSD_VG_NAME=${SCRIPT_NAME%.*}
+OSD_LV_NAME=${SCRIPT_NAME%.*}
+
+# TMPDIR for test data
+[ -d "$TMPDIR" ] || TMPDIR=$(mktemp -d tmp.$SCRIPT_NAME.XXXXXX)
+[ -d "$TMPDIR_TEST_MULTIPLE_MOUNTS" ] || TMPDIR_TEST_MULTIPLE_MOUNTS=$(mktemp -d tmp.$SCRIPT_NAME.XXXXXX)
+
+CEPHADM_SRC_DIR=${SCRIPT_DIR}/../../../src/cephadm
+CEPHADM_SAMPLES_DIR=${CEPHADM_SRC_DIR}/samples
+
+[ -z "$SUDO" ] && SUDO=sudo
+
+# If cephadm is already installed on the system, use that one, avoid building
+# # one if we can.
+if [ -z "$CEPHADM" ] && command -v cephadm >/dev/null ; then
+ CEPHADM="$(command -v cephadm)"
+fi
+
+if [ -z "$CEPHADM" ]; then
+ CEPHADM=`mktemp -p $TMPDIR tmp.cephadm.XXXXXX`
+ ${CEPHADM_SRC_DIR}/build.sh "$CEPHADM"
+ NO_BUILD_INFO=1
+fi
+
+# at this point, we need $CEPHADM set
+if ! [ -x "$CEPHADM" ]; then
+ echo "cephadm not found. Please set \$CEPHADM"
+ exit 1
+fi
+
+# add image to args
+CEPHADM_ARGS="$CEPHADM_ARGS --image $IMAGE_DEFAULT"
+
+# combine into a single var
+CEPHADM_BIN="$CEPHADM"
+CEPHADM="$SUDO $CEPHADM_BIN $CEPHADM_ARGS"
+
+# clean up previous run(s)?
+$CEPHADM rm-cluster --fsid $FSID --force
+$SUDO vgchange -an $OSD_VG_NAME || true
+loopdev=$($SUDO losetup -a | grep $(basename $OSD_IMAGE_NAME) | awk -F : '{print $1}')
+if ! [ "$loopdev" = "" ]; then
+ $SUDO losetup -d $loopdev
+fi
+
+function cleanup()
+{
+ if [ $CLEANUP = false ]; then
+ # preserve the TMPDIR state
+ echo "========================"
+ echo "!!! CLEANUP=$CLEANUP !!!"
+ echo
+ echo "TMPDIR=$TMPDIR"
+ echo "========================"
+ return
+ fi
+
+ dump_all_logs $FSID
+ rm -rf $TMPDIR
+}
+trap cleanup EXIT
+
+function expect_false()
+{
+ set -x
+ if eval "$@"; then return 1; else return 0; fi
+}
+
+# expect_return_code $expected_code $command ...
+function expect_return_code()
+{
+ set -x
+ local expected_code="$1"
+ shift
+ local command="$@"
+
+ set +e
+ eval "$command"
+ local return_code="$?"
+ set -e
+
+ if [ ! "$return_code" -eq "$expected_code" ]; then return 1; else return 0; fi
+}
+
+function is_available()
+{
+ local name="$1"
+ local condition="$2"
+ local tries="$3"
+
+ local num=0
+ while ! eval "$condition"; do
+ num=$(($num + 1))
+ if [ "$num" -ge $tries ]; then
+ echo "$name is not available"
+ false
+ fi
+ sleep 5
+ done
+
+ echo "$name is available"
+ true
+}
+
+function dump_log()
+{
+ local fsid="$1"
+ local name="$2"
+ local num_lines="$3"
+
+ if [ -z $num_lines ]; then
+ num_lines=100
+ fi
+
+ echo '-------------------------'
+ echo 'dump daemon log:' $name
+ echo '-------------------------'
+
+ $CEPHADM logs --fsid $fsid --name $name -- --no-pager -n $num_lines
+}
+
+function dump_all_logs()
+{
+ local fsid="$1"
+ local names=$($CEPHADM ls | jq -r '.[] | select(.fsid == "'$fsid'").name')
+
+ echo 'dumping logs for daemons: ' $names
+ for name in $names; do
+ dump_log $fsid $name
+ done
+}
+
+function nfs_stop()
+{
+ # stop the running nfs server
+ local units="nfs-server nfs-kernel-server"
+ for unit in $units; do
+ if systemctl --no-pager status $unit > /dev/null; then
+ $SUDO systemctl stop $unit
+ fi
+ done
+
+ # ensure the NFS port is no longer in use
+ expect_false "$SUDO ss -tlnp '( sport = :nfs )' | grep LISTEN"
+}
+
+## prepare + check host
+$SUDO $CEPHADM check-host
+
+## run a gather-facts (output to stdout)
+$SUDO $CEPHADM gather-facts
+
+## NOTE: cephadm version is, as of around May 2023, no longer basing the
+## output for `cephadm version` on the version of the containers. The version
+## reported is that of the "binary" and is determined during the ceph build.
+## `cephadm version` should NOT require sudo/root.
+$CEPHADM_BIN version
+$CEPHADM_BIN version | grep 'cephadm version'
+# Typically cmake should be running the cephadm build script with CLI arguments
+# that embed version info into the "binary". If not using a cephadm build via
+# cmake you can set `NO_BUILD_INFO` to skip this check.
+if [ -z "$NO_BUILD_INFO" ]; then
+ $CEPHADM_BIN version | grep -v 'UNSET'
+ $CEPHADM_BIN version | grep -v 'UNKNOWN'
+fi
+
+
+## test shell before bootstrap, when crash dir isn't (yet) present on this host
+$CEPHADM shell --fsid $FSID -- ceph -v | grep 'ceph version'
+$CEPHADM shell --fsid $FSID -e FOO=BAR -- printenv | grep FOO=BAR
+
+# test stdin
+echo foo | $CEPHADM shell -- cat | grep -q foo
+
+# the shell commands a bit above this seems to cause the
+# /var/lib/ceph/<fsid> directory to be made. Since we now
+# check in bootstrap that there are no clusters with the same
+# fsid based on the directory existing, we need to make sure
+# this directory is gone before bootstrapping. We can
+# accomplish this with another rm-cluster
+$CEPHADM rm-cluster --fsid $FSID --force
+
+## bootstrap
+ORIG_CONFIG=`mktemp -p $TMPDIR`
+CONFIG=`mktemp -p $TMPDIR`
+MONCONFIG=`mktemp -p $TMPDIR`
+KEYRING=`mktemp -p $TMPDIR`
+IP=127.0.0.1
+cat <<EOF > $ORIG_CONFIG
+[global]
+ log to file = true
+ osd crush chooseleaf type = 0
+EOF
+$CEPHADM bootstrap \
+ --mon-id a \
+ --mgr-id x \
+ --mon-ip $IP \
+ --fsid $FSID \
+ --config $ORIG_CONFIG \
+ --output-config $CONFIG \
+ --output-keyring $KEYRING \
+ --output-pub-ssh-key $TMPDIR/ceph.pub \
+ --allow-overwrite \
+ --skip-mon-network \
+ --skip-monitoring-stack
+test -e $CONFIG
+test -e $KEYRING
+rm -f $ORIG_CONFIG
+
+$SUDO test -e /var/log/ceph/$FSID/ceph-mon.a.log
+$SUDO test -e /var/log/ceph/$FSID/ceph-mgr.x.log
+
+for u in ceph.target \
+ ceph-$FSID.target \
+ ceph-$FSID@mon.a \
+ ceph-$FSID@mgr.x; do
+ systemctl is-enabled $u
+ systemctl is-active $u
+done
+systemctl | grep system-ceph | grep -q .slice # naming is escaped and annoying
+
+# check ceph -s works (via shell w/ passed config/keyring)
+$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
+ ceph -s | grep $FSID
+
+for t in mon mgr node-exporter prometheus grafana; do
+ $CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
+ ceph orch apply $t --unmanaged
+done
+
+## ls
+$CEPHADM ls | jq '.[]' | jq 'select(.name == "mon.a").fsid' \
+ | grep $FSID
+$CEPHADM ls | jq '.[]' | jq 'select(.name == "mgr.x").fsid' \
+ | grep $FSID
+
+# make sure the version is returned correctly
+$CEPHADM ls | jq '.[]' | jq 'select(.name == "mon.a").version' | grep -q \\.
+
+## deploy
+# add mon.b
+cp $CONFIG $MONCONFIG
+echo "public addrv = [v2:$IP:3301,v1:$IP:6790]" >> $MONCONFIG
+jq --null-input \
+ --arg fsid $FSID \
+ --arg name mon.b \
+ --arg keyring /var/lib/ceph/$FSID/mon.a/keyring \
+ --arg config "$MONCONFIG" \
+ '{"fsid": $fsid, "name": $name, "params":{"keyring": $keyring, "config": $config}}' | \
+ $CEPHADM _orch deploy
+for u in ceph-$FSID@mon.b; do
+ systemctl is-enabled $u
+ systemctl is-active $u
+done
+cond="$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
+ ceph mon stat | grep '2 mons'"
+is_available "mon.b" "$cond" 30
+
+# add mgr.y
+$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
+ ceph auth get-or-create mgr.y \
+ mon 'allow profile mgr' \
+ osd 'allow *' \
+ mds 'allow *' > $TMPDIR/keyring.mgr.y
+jq --null-input \
+ --arg fsid $FSID \
+ --arg name mgr.y \
+ --arg keyring $TMPDIR/keyring.mgr.y \
+ --arg config "$CONFIG" \
+ '{"fsid": $fsid, "name": $name, "params":{"keyring": $keyring, "config": $config}}' | \
+ $CEPHADM _orch deploy
+for u in ceph-$FSID@mgr.y; do
+ systemctl is-enabled $u
+ systemctl is-active $u
+done
+
+for f in `seq 1 30`; do
+ if $CEPHADM shell --fsid $FSID \
+ --config $CONFIG --keyring $KEYRING -- \
+ ceph -s -f json-pretty \
+ | jq '.mgrmap.num_standbys' | grep -q 1 ; then break; fi
+ sleep 1
+done
+$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
+ ceph -s -f json-pretty \
+ | jq '.mgrmap.num_standbys' | grep -q 1
+
+# add osd.{1,2,..}
+dd if=/dev/zero of=$TMPDIR/$OSD_IMAGE_NAME bs=1 count=0 seek=$OSD_IMAGE_SIZE
+loop_dev=$($SUDO losetup -f)
+$SUDO vgremove -f $OSD_VG_NAME || true
+$SUDO losetup $loop_dev $TMPDIR/$OSD_IMAGE_NAME
+$SUDO pvcreate $loop_dev && $SUDO vgcreate $OSD_VG_NAME $loop_dev
+
+# osd bootstrap keyring
+$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
+ ceph auth get client.bootstrap-osd > $TMPDIR/keyring.bootstrap.osd
+
+# create lvs first so ceph-volume doesn't overlap with lv creation
+for id in `seq 0 $((--OSD_TO_CREATE))`; do
+ $SUDO lvcreate -l $((100/$OSD_TO_CREATE))%VG -n $OSD_LV_NAME.$id $OSD_VG_NAME
+done
+
+for id in `seq 0 $((--OSD_TO_CREATE))`; do
+ device_name=/dev/$OSD_VG_NAME/$OSD_LV_NAME.$id
+ CEPH_VOLUME="$CEPHADM ceph-volume \
+ --fsid $FSID \
+ --config $CONFIG \
+ --keyring $TMPDIR/keyring.bootstrap.osd --"
+
+ # prepare the osd
+ $CEPH_VOLUME lvm prepare --bluestore --data $device_name --no-systemd
+ $CEPH_VOLUME lvm batch --no-auto $device_name --yes --no-systemd
+
+ # osd id and osd fsid
+ $CEPH_VOLUME lvm list --format json $device_name > $TMPDIR/osd.map
+ osd_id=$($SUDO cat $TMPDIR/osd.map | jq -cr '.. | ."ceph.osd_id"? | select(.)')
+ osd_fsid=$($SUDO cat $TMPDIR/osd.map | jq -cr '.. | ."ceph.osd_fsid"? | select(.)')
+
+ # deploy the osd
+ jq --null-input \
+ --arg fsid $FSID \
+ --arg name osd.$osd_id \
+ --arg keyring $TMPDIR/keyring.bootstrap.osd \
+ --arg config "$CONFIG" \
+ --arg osd_fsid $osd_fsid \
+ '{"fsid": $fsid, "name": $name, "params":{"keyring": $keyring, "config": $config, "osd_fsid": $osd_fsid}}' | \
+ $CEPHADM _orch deploy
+done
+
+# add node-exporter
+jq --null-input \
+ --arg fsid $FSID \
+ --arg name node-exporter.a \
+ '{"fsid": $fsid, "name": $name}' | \
+ ${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy
+cond="curl 'http://localhost:9100' | grep -q 'Node Exporter'"
+is_available "node-exporter" "$cond" 10
+
+# add prometheus
+jq --null-input \
+ --arg fsid $FSID \
+ --arg name prometheus.a \
+ --argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/prometheus.json)" \
+ '{"fsid": $fsid, "name": $name, "config_blobs": $config_blobs}' | \
+ ${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy
+cond="curl 'localhost:9095/api/v1/query?query=up'"
+is_available "prometheus" "$cond" 10
+
+# add grafana
+jq --null-input \
+ --arg fsid $FSID \
+ --arg name grafana.a \
+ --argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/grafana.json)" \
+ '{"fsid": $fsid, "name": $name, "config_blobs": $config_blobs}' | \
+ ${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy
+cond="curl --insecure 'https://localhost:3000' | grep -q 'grafana'"
+is_available "grafana" "$cond" 50
+
+# add nfs-ganesha
+nfs_stop
+nfs_rados_pool=$(cat ${CEPHADM_SAMPLES_DIR}/nfs.json | jq -r '.["pool"]')
+$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
+ ceph osd pool create $nfs_rados_pool 64
+$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
+ rados --pool nfs-ganesha --namespace nfs-ns create conf-nfs.a
+$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
+ ceph orch pause
+jq --null-input \
+ --arg fsid $FSID \
+ --arg name nfs.a \
+ --arg keyring "$KEYRING" \
+ --arg config "$CONFIG" \
+ --argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/nfs.json)" \
+ '{"fsid": $fsid, "name": $name, "params": {"keyring": $keyring, "config": $config}, "config_blobs": $config_blobs}' | \
+ ${CEPHADM} _orch deploy
+cond="$SUDO ss -tlnp '( sport = :nfs )' | grep 'ganesha.nfsd'"
+is_available "nfs" "$cond" 10
+$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
+ ceph orch resume
+
+# add alertmanager via custom container
+alertmanager_image=$(cat ${CEPHADM_SAMPLES_DIR}/custom_container.json | jq -r '.image')
+tcp_ports=$(jq .ports ${CEPHADM_SAMPLES_DIR}/custom_container.json)
+jq --null-input \
+ --arg fsid $FSID \
+ --arg name container.alertmanager.a \
+ --arg keyring $TMPDIR/keyring.bootstrap.osd \
+ --arg config "$CONFIG" \
+ --arg image "$alertmanager_image" \
+ --argjson tcp_ports "${tcp_ports}" \
+ --argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/custom_container.json)" \
+ '{"fsid": $fsid, "name": $name, "image": $image, "params": {"keyring": $keyring, "config": $config, "tcp_ports": $tcp_ports}, "config_blobs": $config_blobs}' | \
+ ${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy
+cond="$CEPHADM enter --fsid $FSID --name container.alertmanager.a -- test -f \
+ /etc/alertmanager/alertmanager.yml"
+is_available "alertmanager.yml" "$cond" 10
+cond="curl 'http://localhost:9093' | grep -q 'Alertmanager'"
+is_available "alertmanager" "$cond" 10
+
+## run
+# WRITE ME
+
+## unit
+$CEPHADM unit --fsid $FSID --name mon.a -- is-enabled
+$CEPHADM unit --fsid $FSID --name mon.a -- is-active
+expect_false $CEPHADM unit --fsid $FSID --name mon.xyz -- is-active
+$CEPHADM unit --fsid $FSID --name mon.a -- disable
+expect_false $CEPHADM unit --fsid $FSID --name mon.a -- is-enabled
+$CEPHADM unit --fsid $FSID --name mon.a -- enable
+$CEPHADM unit --fsid $FSID --name mon.a -- is-enabled
+$CEPHADM unit --fsid $FSID --name mon.a -- status
+$CEPHADM unit --fsid $FSID --name mon.a -- stop
+expect_return_code 3 $CEPHADM unit --fsid $FSID --name mon.a -- status
+$CEPHADM unit --fsid $FSID --name mon.a -- start
+
+## shell
+$CEPHADM shell --fsid $FSID -- true
+$CEPHADM shell --fsid $FSID -- test -d /var/log/ceph
+expect_false $CEPHADM --timeout 10 shell --fsid $FSID -- sleep 60
+$CEPHADM --timeout 60 shell --fsid $FSID -- sleep 10
+$CEPHADM shell --fsid $FSID --mount $TMPDIR $TMPDIR_TEST_MULTIPLE_MOUNTS -- stat /mnt/$(basename $TMPDIR)
+
+## enter
+expect_false $CEPHADM enter
+$CEPHADM enter --fsid $FSID --name mon.a -- test -d /var/lib/ceph/mon/ceph-a
+$CEPHADM enter --fsid $FSID --name mgr.x -- test -d /var/lib/ceph/mgr/ceph-x
+$CEPHADM enter --fsid $FSID --name mon.a -- pidof ceph-mon
+expect_false $CEPHADM enter --fsid $FSID --name mgr.x -- pidof ceph-mon
+$CEPHADM enter --fsid $FSID --name mgr.x -- pidof ceph-mgr
+# this triggers a bug in older versions of podman, including 18.04's 1.6.2
+#expect_false $CEPHADM --timeout 5 enter --fsid $FSID --name mon.a -- sleep 30
+$CEPHADM --timeout 60 enter --fsid $FSID --name mon.a -- sleep 10
+
+## ceph-volume
+$CEPHADM ceph-volume --fsid $FSID -- inventory --format=json \
+ | jq '.[]'
+
+## preserve test state
+[ $CLEANUP = false ] && exit 0
+
+## rm-daemon
+# mon and osd require --force
+expect_false $CEPHADM rm-daemon --fsid $FSID --name mon.a
+# mgr does not
+$CEPHADM rm-daemon --fsid $FSID --name mgr.x
+
+expect_false $CEPHADM zap-osds --fsid $FSID
+$CEPHADM zap-osds --fsid $FSID --force
+
+## rm-cluster
+expect_false $CEPHADM rm-cluster --fsid $FSID --zap-osds
+$CEPHADM rm-cluster --fsid $FSID --force --zap-osds
+
+echo PASS
diff --git a/qa/workunits/cephadm/test_dashboard_e2e.sh b/qa/workunits/cephadm/test_dashboard_e2e.sh
new file mode 100755
index 000000000..32e0bcc77
--- /dev/null
+++ b/qa/workunits/cephadm/test_dashboard_e2e.sh
@@ -0,0 +1,107 @@
+#!/bin/bash -ex
+
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+DASHBOARD_FRONTEND_DIR=${SCRIPT_DIR}/../../../src/pybind/mgr/dashboard/frontend
+
+[ -z "$SUDO" ] && SUDO=sudo
+
+install_common () {
+ NODEJS_VERSION="16"
+ if grep -q debian /etc/*-release; then
+ $SUDO apt-get update
+ # https://github.com/nodesource/distributions#manual-installation
+ $SUDO apt-get install curl gpg
+ KEYRING=/usr/share/keyrings/nodesource.gpg
+ curl -fsSL https://deb.nodesource.com/gpgkey/nodesource.gpg.key | gpg --dearmor | $SUDO tee "$KEYRING" >/dev/null
+ DISTRO="$(source /etc/lsb-release; echo $DISTRIB_CODENAME)"
+ VERSION="node_$NODEJS_VERSION.x"
+ echo "deb [signed-by=$KEYRING] https://deb.nodesource.com/$VERSION $DISTRO main" | $SUDO tee /etc/apt/sources.list.d/nodesource.list
+ echo "deb-src [signed-by=$KEYRING] https://deb.nodesource.com/$VERSION $DISTRO main" | $SUDO tee -a /etc/apt/sources.list.d/nodesource.list
+ $SUDO apt-get update
+ $SUDO apt-get install nodejs
+ elif grep -q rhel /etc/*-release; then
+ $SUDO yum module -y enable nodejs:$NODEJS_VERSION
+ $SUDO yum install -y jq npm
+ else
+ echo "Unsupported distribution."
+ exit 1
+ fi
+}
+
+install_chrome () {
+ if grep -q debian /etc/*-release; then
+ $SUDO bash -c 'echo "deb [arch=amd64] https://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google-chrome.list'
+ curl -fsSL https://dl.google.com/linux/linux_signing_key.pub | $SUDO apt-key add -
+ $SUDO apt-get update
+ $SUDO apt-get install -y google-chrome-stable
+ $SUDO apt-get install -y xvfb
+ $SUDO rm /etc/apt/sources.list.d/google-chrome.list
+ elif grep -q rhel /etc/*-release; then
+ $SUDO dd of=/etc/yum.repos.d/google-chrome.repo status=none <<EOF
+[google-chrome]
+name=google-chrome
+baseurl=https://dl.google.com/linux/chrome/rpm/stable/\$basearch
+enabled=1
+gpgcheck=1
+gpgkey=https://dl-ssl.google.com/linux/linux_signing_key.pub
+EOF
+ $SUDO yum install -y google-chrome-stable
+ $SUDO rm /etc/yum.repos.d/google-chrome.repo
+ # Cypress dependencies
+ $SUDO yum install -y xorg-x11-server-Xvfb gtk2-devel gtk3-devel libnotify-devel GConf2 nss.x86_64 libXScrnSaver alsa-lib
+ else
+ echo "Unsupported distribution."
+ exit 1
+ fi
+}
+
+cypress_run () {
+ local specs="$1"
+ local timeout="$2"
+ local override_config="excludeSpecPattern=*.po.ts,retries=0,specPattern=${specs}"
+
+ if [ x"$timeout" != "x" ]; then
+ override_config="${override_config},defaultCommandTimeout=${timeout}"
+ fi
+ npx cypress run --browser chrome --headless --config "$override_config"
+}
+
+install_common
+install_chrome
+
+CYPRESS_BASE_URL=$(ceph mgr services | jq -r .dashboard)
+export CYPRESS_BASE_URL
+
+cd $DASHBOARD_FRONTEND_DIR
+
+# This is required for Cypress to understand typescript
+npm ci --unsafe-perm
+npx cypress verify
+npx cypress info
+
+# Take `orch device ls` and `orch ps` as ground truth.
+ceph orch device ls --refresh
+ceph orch ps --refresh
+sleep 10 # the previous call is asynchronous
+ceph orch device ls --format=json | tee cypress/fixtures/orchestrator/inventory.json
+ceph orch ps --format=json | tee cypress/fixtures/orchestrator/services.json
+
+DASHBOARD_ADMIN_SECRET_FILE="/tmp/dashboard-admin-secret.txt"
+printf 'admin' > "${DASHBOARD_ADMIN_SECRET_FILE}"
+ceph dashboard ac-user-set-password admin -i "${DASHBOARD_ADMIN_SECRET_FILE}" --force-password
+
+# Run Dashboard e2e tests.
+# These tests are designed with execution order in mind, since orchestrator operations
+# are likely to change cluster state, we can't just run tests in arbitrarily order.
+# See /ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/ folder.
+find cypress # List all specs
+
+cypress_run "cypress/e2e/orchestrator/01-hosts.e2e-spec.ts"
+
+# Hosts are removed and added in the previous step. Do a refresh again.
+ceph orch device ls --refresh
+sleep 10
+ceph orch device ls --format=json | tee cypress/fixtures/orchestrator/inventory.json
+
+cypress_run "cypress/e2e/orchestrator/03-inventory.e2e-spec.ts"
+cypress_run "cypress/e2e/orchestrator/04-osds.e2e-spec.ts" 300000
diff --git a/qa/workunits/cephadm/test_iscsi_etc_hosts.sh b/qa/workunits/cephadm/test_iscsi_etc_hosts.sh
new file mode 100755
index 000000000..adbc34a92
--- /dev/null
+++ b/qa/workunits/cephadm/test_iscsi_etc_hosts.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+# checks if the container and host's /etc/hosts files match
+# Necessary to avoid potential bugs caused by podman making
+# edits to /etc/hosts file in the container
+# exits with code 1 if host and iscsi container /etc/hosts do no match
+
+set -ex
+
+ISCSI_DAEMON=$(sudo /home/ubuntu/cephtest/cephadm ls | jq -r '.[] | select(.service_name == "iscsi.foo") | .name')
+sudo /home/ubuntu/cephtest/cephadm enter --name $ISCSI_DAEMON -- cat /etc/hosts > iscsi_daemon_etc_hosts.txt
+if cmp --silent /etc/hosts iscsi_daemon_etc_hosts.txt; then
+ echo "Daemon and host /etc/hosts files successfully matched"
+else
+ echo "ERROR: /etc/hosts on host did not match /etc/hosts in the iscsi container!"
+ echo "Host /etc/hosts:"
+ cat /etc/hosts
+ echo "Iscsi container /etc/hosts:"
+ cat iscsi_daemon_etc_hosts.txt
+ exit 1
+fi
diff --git a/qa/workunits/cephadm/test_iscsi_pids_limit.sh b/qa/workunits/cephadm/test_iscsi_pids_limit.sh
new file mode 100755
index 000000000..bed4cc9e2
--- /dev/null
+++ b/qa/workunits/cephadm/test_iscsi_pids_limit.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# checks if the containers default pids-limit (4096) is removed and Iscsi
+# containers continue to run
+# exits 1 if fails
+
+set -ex
+
+ISCSI_CONT_IDS=$(sudo podman ps -qa --filter='name=iscsi')
+CONT_COUNT=$(echo ${ISCSI_CONT_IDS} | wc -w)
+test ${CONT_COUNT} -eq 2
+
+for i in ${ISCSI_CONT_IDS}
+do
+ test $(sudo podman exec ${i} cat /sys/fs/cgroup/pids/pids.max) == max
+done
+
+for i in ${ISCSI_CONT_IDS}
+do
+ sudo podman exec ${i} /bin/sh -c 'for j in {0..20000}; do sleep 300 & done'
+done
+
+for i in ${ISCSI_CONT_IDS}
+do
+ SLEEP_COUNT=$(sudo podman exec ${i} /bin/sh -c 'ps -ef | grep -c sleep')
+ test ${SLEEP_COUNT} -gt 20000
+done
+
+echo OK
diff --git a/qa/workunits/cephadm/test_repos.sh b/qa/workunits/cephadm/test_repos.sh
new file mode 100755
index 000000000..221585fd0
--- /dev/null
+++ b/qa/workunits/cephadm/test_repos.sh
@@ -0,0 +1,45 @@
+#!/bin/bash -ex
+
+SCRIPT_NAME=$(basename ${BASH_SOURCE[0]})
+SCRIPT_DIR=$(dirname ${BASH_SOURCE[0]})
+CEPHADM_SRC_DIR=${SCRIPT_DIR}/../../../src/cephadm
+
+[ -d "$TMPDIR" ] || TMPDIR=$(mktemp -d tmp.$SCRIPT_NAME.XXXXXX)
+trap "$SUDO rm -rf $TMPDIR" EXIT
+
+if [ -z "$CEPHADM" ]; then
+ CEPHADM=`mktemp -p $TMPDIR tmp.cephadm.XXXXXX`
+ ${CEPHADM_SRC_DIR}/build.sh "$CEPHADM"
+fi
+
+# this is a pretty weak test, unfortunately, since the
+# package may also be in the base OS.
+function test_install_uninstall() {
+ ( sudo apt update && \
+ sudo apt -y install cephadm && \
+ sudo $CEPHADM install && \
+ sudo apt -y remove cephadm ) || \
+ ( sudo yum -y install cephadm && \
+ sudo $CEPHADM install && \
+ sudo yum -y remove cephadm ) || \
+ ( sudo dnf -y install cephadm && \
+ sudo $CEPHADM install && \
+ sudo dnf -y remove cephadm ) || \
+ ( sudo zypper -n install cephadm && \
+ sudo $CEPHADM install && \
+ sudo zypper -n remove cephadm )
+}
+
+sudo $CEPHADM -v add-repo --release octopus
+test_install_uninstall
+sudo $CEPHADM -v rm-repo
+
+sudo $CEPHADM -v add-repo --dev main
+test_install_uninstall
+sudo $CEPHADM -v rm-repo
+
+sudo $CEPHADM -v add-repo --release 15.2.7
+test_install_uninstall
+sudo $CEPHADM -v rm-repo
+
+echo OK.
diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh
new file mode 100755
index 000000000..aecfd56a9
--- /dev/null
+++ b/qa/workunits/cephtool/test.sh
@@ -0,0 +1,2991 @@
+#!/usr/bin/env bash
+# -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
+# vim: ts=8 sw=8 ft=bash smarttab
+set -x
+
+source $(dirname $0)/../../standalone/ceph-helpers.sh
+
+set -e
+set -o functrace
+PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
+SUDO=${SUDO:-sudo}
+export CEPH_DEV=1
+
+function check_no_osd_down()
+{
+ ! ceph osd dump | grep ' down '
+}
+
+function wait_no_osd_down()
+{
+ max_run=300
+ for i in $(seq 1 $max_run) ; do
+ if ! check_no_osd_down ; then
+ echo "waiting for osd(s) to come back up ($i/$max_run)"
+ sleep 1
+ else
+ break
+ fi
+ done
+ check_no_osd_down
+}
+
+function expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+function expect_true()
+{
+ set -x
+ if ! "$@"; then return 1; else return 0; fi
+}
+
+TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
+trap "rm -fr $TEMP_DIR" 0
+
+TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
+
+#
+# retry_eagain max cmd args ...
+#
+# retry cmd args ... if it exits on error and its output contains the
+# string EAGAIN, at most $max times
+#
+function retry_eagain()
+{
+ local max=$1
+ shift
+ local status
+ local tmpfile=$TEMP_DIR/retry_eagain.$$
+ local count
+ for count in $(seq 1 $max) ; do
+ status=0
+ "$@" > $tmpfile 2>&1 || status=$?
+ if test $status = 0 ||
+ ! grep --quiet EAGAIN $tmpfile ; then
+ break
+ fi
+ sleep 1
+ done
+ if test $count = $max ; then
+ echo retried with non zero exit status, $max times: "$@" >&2
+ fi
+ cat $tmpfile
+ rm $tmpfile
+ return $status
+}
+
+#
+# map_enxio_to_eagain cmd arg ...
+#
+# add EAGAIN to the output of cmd arg ... if the output contains
+# ENXIO.
+#
+function map_enxio_to_eagain()
+{
+ local status=0
+ local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
+
+ "$@" > $tmpfile 2>&1 || status=$?
+ if test $status != 0 &&
+ grep --quiet ENXIO $tmpfile ; then
+ echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
+ fi
+ cat $tmpfile
+ rm $tmpfile
+ return $status
+}
+
+function check_response()
+{
+ expected_string=$1
+ retcode=$2
+ expected_retcode=$3
+ if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
+ echo "return code invalid: got $retcode, expected $expected_retcode" >&2
+ exit 1
+ fi
+
+ if ! grep --quiet -- "$expected_string" $TMPFILE ; then
+ echo "Didn't find $expected_string in output" >&2
+ cat $TMPFILE >&2
+ exit 1
+ fi
+}
+
+function get_config_value_or_die()
+{
+ local target config_opt raw val
+
+ target=$1
+ config_opt=$2
+
+ raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
+ if [[ $? -ne 0 ]]; then
+ echo "error obtaining config opt '$config_opt' from '$target': $raw"
+ exit 1
+ fi
+
+ raw=`echo $raw | sed -e 's/[{} "]//g'`
+ val=`echo $raw | cut -f2 -d:`
+
+ echo "$val"
+ return 0
+}
+
+function expect_config_value()
+{
+ local target config_opt expected_val val
+ target=$1
+ config_opt=$2
+ expected_val=$3
+
+ val=$(get_config_value_or_die $target $config_opt)
+
+ if [[ "$val" != "$expected_val" ]]; then
+ echo "expected '$expected_val', got '$val'"
+ exit 1
+ fi
+}
+
+function ceph_watch_start()
+{
+ local whatch_opt=--watch
+
+ if [ -n "$1" ]; then
+ whatch_opt=--watch-$1
+ if [ -n "$2" ]; then
+ whatch_opt+=" --watch-channel $2"
+ fi
+ fi
+
+ CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
+ ceph $whatch_opt > $CEPH_WATCH_FILE &
+ CEPH_WATCH_PID=$!
+
+ # wait until the "ceph" client is connected and receiving
+ # log messages from monitor
+ for i in `seq 3`; do
+ grep -q "cluster" $CEPH_WATCH_FILE && break
+ sleep 1
+ done
+}
+
+function ceph_watch_wait()
+{
+ local regexp=$1
+ local timeout=30
+
+ if [ -n "$2" ]; then
+ timeout=$2
+ fi
+
+ for i in `seq ${timeout}`; do
+ grep -q "$regexp" $CEPH_WATCH_FILE && break
+ sleep 1
+ done
+
+ kill $CEPH_WATCH_PID
+
+ if ! grep "$regexp" $CEPH_WATCH_FILE; then
+ echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
+ cat $CEPH_WATCH_FILE >&2
+ return 1
+ fi
+}
+
+function test_mon_injectargs()
+{
+ ceph tell osd.0 injectargs --no-osd_enable_op_tracker
+ ceph tell osd.0 config get osd_enable_op_tracker | grep false
+ ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500'
+ ceph tell osd.0 config get osd_enable_op_tracker | grep true
+ ceph tell osd.0 config get osd_op_history_duration | grep 500
+ ceph tell osd.0 injectargs --no-osd_enable_op_tracker
+ ceph tell osd.0 config get osd_enable_op_tracker | grep false
+ ceph tell osd.0 injectargs -- --osd_enable_op_tracker
+ ceph tell osd.0 config get osd_enable_op_tracker | grep true
+ ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600'
+ ceph tell osd.0 config get osd_enable_op_tracker | grep true
+ ceph tell osd.0 config get osd_op_history_duration | grep 600
+
+ ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200'
+ ceph tell osd.0 config get osd_deep_scrub_interval | grep 2419200
+
+ ceph tell osd.0 injectargs -- '--mon_probe_timeout 2'
+ ceph tell osd.0 config get mon_probe_timeout | grep 2
+
+ ceph tell osd.0 injectargs -- '--mon-lease 6'
+ ceph tell osd.0 config get mon_lease | grep 6
+
+ # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
+ expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 2> $TMPFILE || return 1
+ check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
+
+ expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
+ ceph tell osd.0 injectargs -- '--osd_op_history_duration'
+
+}
+
+function test_mon_injectargs_SI()
+{
+ # Test SI units during injectargs and 'config set'
+ # We only aim at testing the units are parsed accordingly
+ # and don't intend to test whether the options being set
+ # actually expect SI units to be passed.
+ # Keep in mind that all integer based options that are not based on bytes
+ # (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to
+ # base 10.
+ initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
+ $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
+ expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
+ $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
+ expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
+ $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
+ expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
+ $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
+ check_response "(22) Invalid argument"
+ # now test with injectargs
+ ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
+ expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
+ ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
+ expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
+ ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
+ expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
+ expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
+ expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
+ $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
+}
+
+function test_mon_injectargs_IEC()
+{
+ # Test IEC units during injectargs and 'config set'
+ # We only aim at testing the units are parsed accordingly
+ # and don't intend to test whether the options being set
+ # actually expect IEC units to be passed.
+ # Keep in mind that all integer based options that are based on bytes
+ # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
+ # unit modifiers (for backwards compatibility and convenience) and be parsed
+ # to base 2.
+ initial_value=$(get_config_value_or_die "mon.a" "mon_data_size_warn")
+ $SUDO ceph daemon mon.a config set mon_data_size_warn 15000000000
+ expect_config_value "mon.a" "mon_data_size_warn" 15000000000
+ $SUDO ceph daemon mon.a config set mon_data_size_warn 15G
+ expect_config_value "mon.a" "mon_data_size_warn" 16106127360
+ $SUDO ceph daemon mon.a config set mon_data_size_warn 16Gi
+ expect_config_value "mon.a" "mon_data_size_warn" 17179869184
+ $SUDO ceph daemon mon.a config set mon_data_size_warn 10F > $TMPFILE || true
+ check_response "(22) Invalid argument"
+ # now test with injectargs
+ ceph tell mon.a injectargs '--mon_data_size_warn 15000000000'
+ expect_config_value "mon.a" "mon_data_size_warn" 15000000000
+ ceph tell mon.a injectargs '--mon_data_size_warn 15G'
+ expect_config_value "mon.a" "mon_data_size_warn" 16106127360
+ ceph tell mon.a injectargs '--mon_data_size_warn 16Gi'
+ expect_config_value "mon.a" "mon_data_size_warn" 17179869184
+ expect_false ceph tell mon.a injectargs '--mon_data_size_warn 10F'
+ $SUDO ceph daemon mon.a config set mon_data_size_warn $initial_value
+}
+
+function test_tiering_agent()
+{
+ local slow=slow_eviction
+ local fast=fast_eviction
+ ceph osd pool create $slow 1 1
+ ceph osd pool application enable $slow rados
+ ceph osd pool create $fast 1 1
+ ceph osd tier add $slow $fast
+ ceph osd tier cache-mode $fast writeback
+ ceph osd tier set-overlay $slow $fast
+ ceph osd pool set $fast hit_set_type bloom
+ rados -p $slow put obj1 /etc/group
+ ceph osd pool set $fast target_max_objects 1
+ ceph osd pool set $fast hit_set_count 1
+ ceph osd pool set $fast hit_set_period 5
+ # wait for the object to be evicted from the cache
+ local evicted
+ evicted=false
+ for i in `seq 1 300` ; do
+ if ! rados -p $fast ls | grep obj1 ; then
+ evicted=true
+ break
+ fi
+ sleep 1
+ done
+ $evicted # assert
+ # the object is proxy read and promoted to the cache
+ rados -p $slow get obj1 - >/dev/null
+ # wait for the promoted object to be evicted again
+ evicted=false
+ for i in `seq 1 300` ; do
+ if ! rados -p $fast ls | grep obj1 ; then
+ evicted=true
+ break
+ fi
+ sleep 1
+ done
+ $evicted # assert
+ ceph osd tier remove-overlay $slow
+ ceph osd tier remove $slow $fast
+ ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
+ ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
+}
+
+function test_tiering_1()
+{
+ # tiering
+ ceph osd pool create slow 2
+ ceph osd pool application enable slow rados
+ ceph osd pool create slow2 2
+ ceph osd pool application enable slow2 rados
+ ceph osd pool create cache 2
+ ceph osd pool create cache2 2
+ ceph osd tier add slow cache
+ ceph osd tier add slow cache2
+ expect_false ceph osd tier add slow2 cache
+ # application metadata should propagate to the tiers
+ ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow") | .application_metadata["rados"]' | grep '{}'
+ ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow2") | .application_metadata["rados"]' | grep '{}'
+ ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache") | .application_metadata["rados"]' | grep '{}'
+ ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache2") | .application_metadata["rados"]' | grep '{}'
+ # forward is removed/deprecated
+ expect_false ceph osd tier cache-mode cache forward
+ expect_false ceph osd tier cache-mode cache forward --yes-i-really-mean-it
+ # test some state transitions
+ ceph osd tier cache-mode cache writeback
+ expect_false ceph osd tier cache-mode cache readonly
+ expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
+ ceph osd tier cache-mode cache proxy
+ ceph osd tier cache-mode cache readproxy
+ ceph osd tier cache-mode cache none
+ ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
+ ceph osd tier cache-mode cache none
+ ceph osd tier cache-mode cache writeback
+ ceph osd tier cache-mode cache proxy
+ ceph osd tier cache-mode cache writeback
+ expect_false ceph osd tier cache-mode cache none
+ expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
+ # test with dirty objects in the tier pool
+ # tier pool currently set to 'writeback'
+ rados -p cache put /etc/passwd /etc/passwd
+ flush_pg_stats
+ # 1 dirty object in pool 'cache'
+ ceph osd tier cache-mode cache proxy
+ expect_false ceph osd tier cache-mode cache none
+ expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
+ ceph osd tier cache-mode cache writeback
+ # remove object from tier pool
+ rados -p cache rm /etc/passwd
+ rados -p cache cache-flush-evict-all
+ flush_pg_stats
+ # no dirty objects in pool 'cache'
+ ceph osd tier cache-mode cache proxy
+ ceph osd tier cache-mode cache none
+ ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
+ TRIES=0
+ while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
+ do
+ grep 'currently creating pgs' $TMPFILE
+ TRIES=$(( $TRIES + 1 ))
+ test $TRIES -ne 60
+ sleep 3
+ done
+ expect_false ceph osd pool set cache pg_num 4
+ ceph osd tier cache-mode cache none
+ ceph osd tier set-overlay slow cache
+ expect_false ceph osd tier set-overlay slow cache2
+ expect_false ceph osd tier remove slow cache
+ ceph osd tier remove-overlay slow
+ ceph osd tier set-overlay slow cache2
+ ceph osd tier remove-overlay slow
+ ceph osd tier remove slow cache
+ ceph osd tier add slow2 cache
+ expect_false ceph osd tier set-overlay slow cache
+ ceph osd tier set-overlay slow2 cache
+ ceph osd tier remove-overlay slow2
+ ceph osd tier remove slow2 cache
+ ceph osd tier remove slow cache2
+
+ # make sure a non-empty pool fails
+ rados -p cache2 put /etc/passwd /etc/passwd
+ while ! ceph df | grep cache2 | grep ' 1 ' ; do
+ echo waiting for pg stats to flush
+ sleep 2
+ done
+ expect_false ceph osd tier add slow cache2
+ ceph osd tier add slow cache2 --force-nonempty
+ ceph osd tier remove slow cache2
+
+ ceph osd pool ls | grep cache2
+ ceph osd pool ls -f json-pretty | grep cache2
+ ceph osd pool ls detail | grep cache2
+ ceph osd pool ls detail -f json-pretty | grep cache2
+
+ ceph osd pool delete slow slow --yes-i-really-really-mean-it
+ ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
+ ceph osd pool delete cache cache --yes-i-really-really-mean-it
+ ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
+}
+
+function test_tiering_2()
+{
+ # make sure we can't clobber snapshot state
+ ceph osd pool create snap_base 2
+ ceph osd pool application enable snap_base rados
+ ceph osd pool create snap_cache 2
+ ceph osd pool mksnap snap_cache snapname
+ expect_false ceph osd tier add snap_base snap_cache
+ ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
+ ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
+}
+
+function test_tiering_3()
+{
+ # make sure we can't create snapshot on tier
+ ceph osd pool create basex 2
+ ceph osd pool application enable basex rados
+ ceph osd pool create cachex 2
+ ceph osd tier add basex cachex
+ expect_false ceph osd pool mksnap cache snapname
+ ceph osd tier remove basex cachex
+ ceph osd pool delete basex basex --yes-i-really-really-mean-it
+ ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
+}
+
+function test_tiering_4()
+{
+ # make sure we can't create an ec pool tier
+ ceph osd pool create eccache 2 2 erasure
+ expect_false ceph osd set-require-min-compat-client bobtail
+ ceph osd pool create repbase 2
+ ceph osd pool application enable repbase rados
+ expect_false ceph osd tier add repbase eccache
+ ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
+ ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
+}
+
+function test_tiering_5()
+{
+ # convenient add-cache command
+ ceph osd pool create slow 2
+ ceph osd pool application enable slow rados
+ ceph osd pool create cache3 2
+ ceph osd tier add-cache slow cache3 1024000
+ ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
+ ceph osd tier remove slow cache3 2> $TMPFILE || true
+ check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
+ ceph osd tier remove-overlay slow
+ ceph osd tier remove slow cache3
+ ceph osd pool ls | grep cache3
+ ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
+ ! ceph osd pool ls | grep cache3 || exit 1
+ ceph osd pool delete slow slow --yes-i-really-really-mean-it
+}
+
+function test_tiering_6()
+{
+ # check add-cache whether work
+ ceph osd pool create datapool 2
+ ceph osd pool application enable datapool rados
+ ceph osd pool create cachepool 2
+ ceph osd tier add-cache datapool cachepool 1024000
+ ceph osd tier cache-mode cachepool writeback
+ rados -p datapool put object /etc/passwd
+ rados -p cachepool stat object
+ rados -p cachepool cache-flush object
+ rados -p datapool stat object
+ ceph osd tier remove-overlay datapool
+ ceph osd tier remove datapool cachepool
+ ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
+ ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
+}
+
+function test_tiering_7()
+{
+ # protection against pool removal when used as tiers
+ ceph osd pool create datapool 2
+ ceph osd pool application enable datapool rados
+ ceph osd pool create cachepool 2
+ ceph osd tier add-cache datapool cachepool 1024000
+ ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
+ check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
+ ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
+ check_response "EBUSY: pool 'datapool' has tiers cachepool"
+ ceph osd tier remove-overlay datapool
+ ceph osd tier remove datapool cachepool
+ ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
+ ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
+}
+
+function test_tiering_8()
+{
+ ## check health check
+ ceph osd set notieragent
+ ceph osd pool create datapool 2
+ ceph osd pool application enable datapool rados
+ ceph osd pool create cache4 2
+ ceph osd tier add-cache datapool cache4 1024000
+ ceph osd tier cache-mode cache4 writeback
+ tmpfile=$(mktemp|grep tmp)
+ dd if=/dev/zero of=$tmpfile bs=4K count=1
+ ceph osd pool set cache4 target_max_objects 200
+ ceph osd pool set cache4 target_max_bytes 1000000
+ rados -p cache4 put foo1 $tmpfile
+ rados -p cache4 put foo2 $tmpfile
+ rm -f $tmpfile
+ flush_pg_stats
+ ceph df | grep datapool | grep ' 2 '
+ ceph osd tier remove-overlay datapool
+ ceph osd tier remove datapool cache4
+ ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
+ ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
+ ceph osd unset notieragent
+}
+
+function test_tiering_9()
+{
+ # make sure 'tier remove' behaves as we expect
+ # i.e., removing a tier from a pool that's not its base pool only
+ # results in a 'pool foo is now (or already was) not a tier of bar'
+ #
+ ceph osd pool create basepoolA 2
+ ceph osd pool application enable basepoolA rados
+ ceph osd pool create basepoolB 2
+ ceph osd pool application enable basepoolB rados
+ poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
+ poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
+
+ ceph osd pool create cache5 2
+ ceph osd pool create cache6 2
+ ceph osd tier add basepoolA cache5
+ ceph osd tier add basepoolB cache6
+ ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
+ ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
+ ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
+ ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
+
+ ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
+ ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
+ ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
+ ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
+
+ ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
+ ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
+
+ ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
+ ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
+ ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
+ ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
+}
+
+function test_auth()
+{
+ expect_false ceph auth add client.xx mon 'invalid' osd "allow *"
+ expect_false ceph auth add client.xx mon 'allow *' osd "allow *" invalid "allow *"
+ ceph auth add client.xx mon 'allow *' osd "allow *"
+ ceph auth export client.xx >client.xx.keyring
+ ceph auth add client.xx -i client.xx.keyring
+ rm -f client.xx.keyring
+ ceph auth list | grep client.xx
+ ceph auth ls | grep client.xx
+ ceph auth get client.xx | grep caps | grep mon
+ ceph auth get client.xx | grep caps | grep osd
+ ceph auth get-key client.xx
+ ceph auth print-key client.xx
+ ceph auth print_key client.xx
+ ceph auth caps client.xx osd "allow rw"
+ expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
+ ceph auth get client.xx | grep osd | grep "allow rw"
+ ceph auth caps client.xx mon 'allow command "osd tree"'
+ ceph auth export | grep client.xx
+ ceph auth export -o authfile
+ ceph auth import -i authfile
+
+ ceph auth export -o authfile2
+ diff authfile authfile2
+ rm authfile authfile2
+ ceph auth del client.xx
+ expect_false ceph auth get client.xx
+
+ # (almost) interactive mode
+ echo -e 'auth add client.xx mon "allow *" osd "allow *"\n' | ceph
+ ceph auth get client.xx
+ # script mode
+ echo 'auth del client.xx' | ceph
+ expect_false ceph auth get client.xx
+}
+
+function test_auth_profiles()
+{
+ ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
+ mgr 'allow profile read-only'
+ ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
+ mgr 'allow profile read-write'
+ ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
+
+ ceph auth export > client.xx.keyring
+
+ # read-only is allowed all read-only commands (auth excluded)
+ ceph -n client.xx-profile-ro -k client.xx.keyring status
+ ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
+ ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
+ ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
+ # read-only gets access denied for rw commands or auth commands
+ ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
+ check_response "EACCES: access denied"
+ ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
+ check_response "EACCES: access denied"
+ ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true
+ check_response "EACCES: access denied"
+
+ # read-write is allowed for all read-write commands (except auth)
+ ceph -n client.xx-profile-rw -k client.xx.keyring status
+ ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
+ ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
+ ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
+ ceph -n client.xx-profile-rw -k client.xx.keyring fs dump
+ ceph -n client.xx-profile-rw -k client.xx.keyring log foo
+ ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
+ ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
+ # read-write gets access denied for auth commands
+ ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true
+ check_response "EACCES: access denied"
+
+ # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
+ ceph -n client.xx-profile-rd -k client.xx.keyring auth ls
+ ceph -n client.xx-profile-rd -k client.xx.keyring auth export
+ ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
+ ceph -n client.xx-profile-rd -k client.xx.keyring status
+ ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
+ check_response "EACCES: access denied"
+ ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
+ check_response "EACCES: access denied"
+ # read-only 'mon' subsystem commands are allowed
+ ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
+ # but read-write 'mon' commands are not
+ ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
+ check_response "EACCES: access denied"
+ ceph -n client.xx-profile-rd -k client.xx.keyring fs dump >& $TMPFILE || true
+ check_response "EACCES: access denied"
+ ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
+ check_response "EACCES: access denied"
+ ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
+ check_response "EACCES: access denied"
+
+ ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
+ ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
+
+ # add a new role-definer with the existing role-definer
+ ceph -n client.xx-profile-rd -k client.xx.keyring \
+ auth add client.xx-profile-rd2 mon 'allow profile role-definer'
+ ceph -n client.xx-profile-rd -k client.xx.keyring \
+ auth export > client.xx.keyring.2
+ # remove old role-definer using the new role-definer
+ ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
+ auth del client.xx-profile-rd
+ # remove the remaining role-definer with admin
+ ceph auth del client.xx-profile-rd2
+ rm -f client.xx.keyring client.xx.keyring.2
+}
+
+function test_mon_caps()
+{
+ ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
+ chmod +r $TEMP_DIR/ceph.client.bug.keyring
+ ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
+ ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
+
+ # pass --no-mon-config since we are looking for the permission denied error
+ rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
+ cat $TMPFILE
+ check_response "Permission denied"
+
+ rm -rf $TEMP_DIR/ceph.client.bug.keyring
+ ceph auth del client.bug
+ ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
+ chmod +r $TEMP_DIR/ceph.client.bug.keyring
+ ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
+ ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
+ ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
+ rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
+ check_response "Permission denied"
+}
+
+function test_mon_misc()
+{
+ # with and without verbosity
+ ceph osd dump | grep '^epoch'
+ ceph --concise osd dump | grep '^epoch'
+
+ ceph osd df | grep 'MIN/MAX VAR'
+
+ # df
+ ceph df > $TMPFILE
+ grep RAW $TMPFILE
+ grep -v DIRTY $TMPFILE
+ ceph df detail > $TMPFILE
+ grep DIRTY $TMPFILE
+ ceph df --format json > $TMPFILE
+ grep 'total_bytes' $TMPFILE
+ grep -v 'dirty' $TMPFILE
+ ceph df detail --format json > $TMPFILE
+ grep 'rd_bytes' $TMPFILE
+ grep 'dirty' $TMPFILE
+ ceph df --format xml | grep '<total_bytes>'
+ ceph df detail --format xml | grep '<rd_bytes>'
+
+ ceph fsid
+ ceph health
+ ceph health detail
+ ceph health --format json-pretty
+ ceph health detail --format xml-pretty
+
+ ceph time-sync-status
+
+ ceph node ls
+ for t in mon osd mds mgr ; do
+ ceph node ls $t
+ done
+
+ ceph_watch_start
+ mymsg="this is a test log message $$.$(date)"
+ ceph log "$mymsg"
+ ceph log last | grep "$mymsg"
+ ceph log last 100 | grep "$mymsg"
+ ceph_watch_wait "$mymsg"
+
+ ceph mgr stat
+ ceph mgr dump
+ ceph mgr dump | jq -e '.active_clients[0].name'
+ ceph mgr module ls
+ ceph mgr module enable restful
+ expect_false ceph mgr module enable foodne
+ ceph mgr module enable foodne --force
+ ceph mgr module disable foodne
+ ceph mgr module disable foodnebizbangbash
+
+ ceph mon metadata a
+ ceph mon metadata
+ ceph mon count-metadata ceph_version
+ ceph mon versions
+
+ ceph mgr metadata
+ ceph mgr versions
+ ceph mgr count-metadata ceph_version
+
+ ceph versions
+
+ ceph node ls
+}
+
+function check_mds_active()
+{
+ fs_name=$1
+ ceph fs get $fs_name | grep active
+}
+
+function wait_mds_active()
+{
+ fs_name=$1
+ max_run=300
+ for i in $(seq 1 $max_run) ; do
+ if ! check_mds_active $fs_name ; then
+ echo "waiting for an active MDS daemon ($i/$max_run)"
+ sleep 5
+ else
+ break
+ fi
+ done
+ check_mds_active $fs_name
+}
+
+function get_mds_gids()
+{
+ fs_name=$1
+ ceph fs get $fs_name --format=json | python3 -c "import json; import sys; print(' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()]))"
+}
+
+function fail_all_mds()
+{
+ fs_name=$1
+ ceph fs set $fs_name cluster_down true
+ mds_gids=$(get_mds_gids $fs_name)
+ for mds_gid in $mds_gids ; do
+ ceph mds fail $mds_gid
+ done
+ if check_mds_active $fs_name ; then
+ echo "An active MDS remains, something went wrong"
+ ceph fs get $fs_name
+ exit -1
+ fi
+
+}
+
+function remove_all_fs()
+{
+ existing_fs=$(ceph fs ls --format=json | python3 -c "import json; import sys; print(' '.join([fs['name'] for fs in json.load(sys.stdin)]))")
+ for fs_name in $existing_fs ; do
+ echo "Removing fs ${fs_name}..."
+ fail_all_mds $fs_name
+ echo "Removing existing filesystem '${fs_name}'..."
+ ceph fs rm $fs_name --yes-i-really-mean-it
+ echo "Removed '${fs_name}'."
+ done
+}
+
+# So that tests requiring MDS can skip if one is not configured
+# in the cluster at all
+function mds_exists()
+{
+ ceph auth ls | grep "^mds"
+}
+
+# some of the commands are just not idempotent.
+function without_test_dup_command()
+{
+ if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
+ $@
+ else
+ local saved=${CEPH_CLI_TEST_DUP_COMMAND}
+ unset CEPH_CLI_TEST_DUP_COMMAND
+ $@
+ CEPH_CLI_TEST_DUP_COMMAND=saved
+ fi
+}
+
+function test_mds_tell()
+{
+ local FS_NAME=cephfs
+ if ! mds_exists ; then
+ echo "Skipping test, no MDS found"
+ return
+ fi
+
+ remove_all_fs
+ ceph osd pool create fs_data 16
+ ceph osd pool create fs_metadata 16
+ ceph fs new $FS_NAME fs_metadata fs_data
+ wait_mds_active $FS_NAME
+
+ # Test injectargs by GID
+ old_mds_gids=$(get_mds_gids $FS_NAME)
+ echo Old GIDs: $old_mds_gids
+
+ for mds_gid in $old_mds_gids ; do
+ ceph tell mds.$mds_gid injectargs "--debug-mds 20"
+ done
+ expect_false ceph tell mds.a injectargs mds_max_file_recover -1
+
+ # Test respawn by rank
+ without_test_dup_command ceph tell mds.0 respawn
+ new_mds_gids=$old_mds_gids
+ while [ $new_mds_gids -eq $old_mds_gids ] ; do
+ sleep 5
+ new_mds_gids=$(get_mds_gids $FS_NAME)
+ done
+ echo New GIDs: $new_mds_gids
+
+ # Test respawn by ID
+ without_test_dup_command ceph tell mds.a respawn
+ new_mds_gids=$old_mds_gids
+ while [ $new_mds_gids -eq $old_mds_gids ] ; do
+ sleep 5
+ new_mds_gids=$(get_mds_gids $FS_NAME)
+ done
+ echo New GIDs: $new_mds_gids
+
+ remove_all_fs
+ ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
+ ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
+}
+
+function test_mon_mds()
+{
+ local FS_NAME=cephfs
+ remove_all_fs
+
+ ceph osd pool create fs_data 16
+ ceph osd pool create fs_metadata 16
+ ceph fs new $FS_NAME fs_metadata fs_data
+
+ ceph fs set $FS_NAME cluster_down true
+ ceph fs set $FS_NAME cluster_down false
+
+ ceph mds compat rm_incompat 4
+ ceph mds compat rm_incompat 4
+
+ # We don't want any MDSs to be up, their activity can interfere with
+ # the "current_epoch + 1" checking below if they're generating updates
+ fail_all_mds $FS_NAME
+
+ ceph mds compat show
+ ceph fs dump
+ ceph fs get $FS_NAME
+ for mds_gid in $(get_mds_gids $FS_NAME) ; do
+ ceph mds metadata $mds_id
+ done
+ ceph mds metadata
+ ceph mds versions
+ ceph mds count-metadata os
+
+ # XXX mds fail, but how do you undo it?
+ mdsmapfile=$TEMP_DIR/mdsmap.$$
+ current_epoch=$(ceph fs dump -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
+ [ -s $mdsmapfile ]
+ rm $mdsmapfile
+
+ ceph osd pool create data2 16
+ ceph osd pool create data3 16
+ data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
+ data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
+ ceph fs add_data_pool cephfs $data2_pool
+ ceph fs add_data_pool cephfs $data3_pool
+ ceph fs add_data_pool cephfs 100 >& $TMPFILE || true
+ check_response "Error ENOENT"
+ ceph fs add_data_pool cephfs foobarbaz >& $TMPFILE || true
+ check_response "Error ENOENT"
+ ceph fs rm_data_pool cephfs $data2_pool
+ ceph fs rm_data_pool cephfs $data3_pool
+ ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
+ ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
+ ceph fs set cephfs max_mds 4
+ ceph fs set cephfs max_mds 3
+ ceph fs set cephfs max_mds 256
+ expect_false ceph fs set cephfs max_mds 257
+ ceph fs set cephfs max_mds 4
+ ceph fs set cephfs max_mds 256
+ expect_false ceph fs set cephfs max_mds 257
+ expect_false ceph fs set cephfs max_mds asdf
+ expect_false ceph fs set cephfs inline_data true
+ ceph fs set cephfs inline_data true --yes-i-really-really-mean-it
+ ceph fs set cephfs inline_data yes --yes-i-really-really-mean-it
+ ceph fs set cephfs inline_data 1 --yes-i-really-really-mean-it
+ expect_false ceph fs set cephfs inline_data --yes-i-really-really-mean-it
+ ceph fs set cephfs inline_data false
+ ceph fs set cephfs inline_data no
+ ceph fs set cephfs inline_data 0
+ expect_false ceph fs set cephfs inline_data asdf
+ ceph fs set cephfs max_file_size 1048576
+ expect_false ceph fs set cephfs max_file_size 123asdf
+
+ expect_false ceph fs set cephfs allow_new_snaps
+ ceph fs set cephfs allow_new_snaps true
+ ceph fs set cephfs allow_new_snaps 0
+ ceph fs set cephfs allow_new_snaps false
+ ceph fs set cephfs allow_new_snaps no
+ expect_false ceph fs set cephfs allow_new_snaps taco
+
+ # we should never be able to add EC pools as data or metadata pools
+ # create an ec-pool...
+ ceph osd pool create mds-ec-pool 16 16 erasure
+ set +e
+ ceph fs add_data_pool cephfs mds-ec-pool 2>$TMPFILE
+ check_response 'erasure-code' $? 22
+ set -e
+ ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
+ data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
+ metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
+
+ fail_all_mds $FS_NAME
+
+ set +e
+ # Check that rmfailed requires confirmation
+ expect_false ceph mds rmfailed 0
+ ceph mds rmfailed 0 --yes-i-really-mean-it
+ set -e
+
+ # Check that `fs new` is no longer permitted
+ expect_false ceph fs new cephfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
+
+ # Check that 'fs reset' runs
+ ceph fs reset $FS_NAME --yes-i-really-mean-it
+
+ # Check that creating a second FS fails by default
+ ceph osd pool create fs_metadata2 16
+ ceph osd pool create fs_data2 16
+ set +e
+ expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
+ set -e
+
+ # Check that setting enable_multiple enables creation of second fs
+ ceph fs flag set enable_multiple true --yes-i-really-mean-it
+ ceph fs new cephfs2 fs_metadata2 fs_data2
+
+ # Clean up multi-fs stuff
+ fail_all_mds cephfs2
+ ceph fs rm cephfs2 --yes-i-really-mean-it
+ ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
+ ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
+
+ fail_all_mds $FS_NAME
+
+ # Clean up to enable subsequent fs new tests
+ ceph fs rm $FS_NAME --yes-i-really-mean-it
+
+ set +e
+ ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
+ check_response 'erasure-code' $? 22
+ ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
+ check_response 'already used by filesystem' $? 22
+ ceph fs new $FS_NAME mds-ec-pool fs_data --force 2>$TMPFILE
+ check_response 'erasure-code' $? 22
+ ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
+ check_response 'erasure-code' $? 22
+ set -e
+
+ # ... new create a cache tier in front of the EC pool...
+ ceph osd pool create mds-tier 2
+ ceph osd tier add mds-ec-pool mds-tier
+ ceph osd tier set-overlay mds-ec-pool mds-tier
+ tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
+
+ # Use of a readonly tier should be forbidden
+ ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
+ set +e
+ ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
+ check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
+ set -e
+
+ # Use of a writeback tier should enable FS creation
+ ceph osd tier cache-mode mds-tier writeback
+ ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
+
+ # While a FS exists using the tiered pools, I should not be allowed
+ # to remove the tier
+ set +e
+ ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
+ check_response 'in use by CephFS' $? 16
+ ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
+ check_response 'in use by CephFS' $? 16
+ set -e
+
+ fail_all_mds $FS_NAME
+ ceph fs rm $FS_NAME --yes-i-really-mean-it
+
+ # ... but we should be forbidden from using the cache pool in the FS directly.
+ set +e
+ ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
+ check_response 'in use as a cache tier' $? 22
+ ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
+ check_response 'already used by filesystem' $? 22
+ ceph fs new $FS_NAME mds-tier fs_data --force 2>$TMPFILE
+ check_response 'in use as a cache tier' $? 22
+ ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
+ check_response 'already used by filesystem' $? 22
+ ceph fs new $FS_NAME mds-tier mds-tier --force 2>$TMPFILE
+ check_response 'in use as a cache tier' $? 22
+ set -e
+
+ # Clean up tier + EC pools
+ ceph osd tier remove-overlay mds-ec-pool
+ ceph osd tier remove mds-ec-pool mds-tier
+
+ # Create a FS using the 'cache' pool now that it's no longer a tier
+ ceph fs new $FS_NAME fs_metadata mds-tier --force
+
+ # We should be forbidden from using this pool as a tier now that
+ # it's in use for CephFS
+ set +e
+ ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
+ check_response 'in use by CephFS' $? 16
+ set -e
+
+ fail_all_mds $FS_NAME
+ ceph fs rm $FS_NAME --yes-i-really-mean-it
+
+ # We should be permitted to use an EC pool with overwrites enabled
+ # as the data pool...
+ ceph osd pool set mds-ec-pool allow_ec_overwrites true
+ ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
+ fail_all_mds $FS_NAME
+ ceph fs rm $FS_NAME --yes-i-really-mean-it
+
+ # ...but not as the metadata pool
+ set +e
+ ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
+ check_response 'already used by filesystem' $? 22
+ ceph fs new $FS_NAME mds-ec-pool fs_data --force 2>$TMPFILE
+ check_response 'erasure-code' $? 22
+ set -e
+
+ ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
+
+ # Create a FS and check that we can subsequently add a cache tier to it
+ ceph fs new $FS_NAME fs_metadata fs_data --force
+
+ # Adding overlay to FS pool should be permitted, RADOS clients handle this.
+ ceph osd tier add fs_metadata mds-tier
+ ceph osd tier cache-mode mds-tier writeback
+ ceph osd tier set-overlay fs_metadata mds-tier
+
+ # Removing tier should be permitted because the underlying pool is
+ # replicated (#11504 case)
+ ceph osd tier cache-mode mds-tier proxy
+ ceph osd tier remove-overlay fs_metadata
+ ceph osd tier remove fs_metadata mds-tier
+ ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
+
+ # Clean up FS
+ fail_all_mds $FS_NAME
+ ceph fs rm $FS_NAME --yes-i-really-mean-it
+
+
+
+ ceph mds stat
+ # ceph mds tell mds.a getmap
+ # ceph mds rm
+ # ceph mds rmfailed
+ # ceph mds set_state
+
+ ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
+ ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
+}
+
+function test_mon_mds_metadata()
+{
+ local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
+ test "$nmons" -gt 0
+
+ ceph fs dump |
+ sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
+ while read gid id rank; do
+ ceph mds metadata ${gid} | grep '"hostname":'
+ ceph mds metadata ${id} | grep '"hostname":'
+ ceph mds metadata ${rank} | grep '"hostname":'
+
+ local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
+ test "$n" -eq "$nmons"
+ done
+
+ expect_false ceph mds metadata UNKNOWN
+}
+
+function test_mon_mon()
+{
+ # print help message
+ ceph --help mon
+ # -h works even when some arguments are passed
+ ceph osd dump -h | grep 'osd dump'
+ ceph osd dump 123 -h | grep 'osd dump'
+ # no mon add/remove
+ ceph mon dump
+ ceph mon getmap -o $TEMP_DIR/monmap.$$
+ [ -s $TEMP_DIR/monmap.$$ ]
+
+ # ceph mon tell
+ first=$(ceph mon dump -f json | jq -r '.mons[0].name')
+ ceph tell mon.$first mon_status
+
+ # test mon features
+ ceph mon feature ls
+ ceph mon feature set kraken --yes-i-really-mean-it
+ expect_false ceph mon feature set abcd
+ expect_false ceph mon feature set abcd --yes-i-really-mean-it
+
+ # test elector
+ expect_failure $TEMP_DIR ceph mon add disallowed_leader $first
+ ceph mon set election_strategy disallow
+ ceph mon add disallowed_leader $first
+ ceph mon set election_strategy connectivity
+ ceph mon rm disallowed_leader $first
+ ceph mon set election_strategy classic
+ expect_failure $TEMP_DIR ceph mon rm disallowed_leader $first
+
+ # test mon stat
+ # don't check output, just ensure it does not fail.
+ ceph mon stat
+ ceph mon stat -f json | jq '.'
+}
+
+function test_mon_priority_and_weight()
+{
+ for i in 0 1 65535; do
+ ceph mon set-weight a $i
+ w=$(ceph mon dump --format=json-pretty 2>/dev/null | jq '.mons[0].weight')
+ [[ "$w" == "$i" ]]
+ done
+
+ for i in -1 65536; do
+ expect_false ceph mon set-weight a $i
+ done
+}
+
+function gen_secrets_file()
+{
+ # lets assume we can have the following types
+ # all - generates both cephx and lockbox, with mock dm-crypt key
+ # cephx - only cephx
+ # no_cephx - lockbox and dm-crypt, no cephx
+ # no_lockbox - dm-crypt and cephx, no lockbox
+ # empty - empty file
+ # empty_json - correct json, empty map
+ # bad_json - bad json :)
+ #
+ local t=$1
+ if [[ -z "$t" ]]; then
+ t="all"
+ fi
+
+ fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
+ echo $fn
+ if [[ "$t" == "empty" ]]; then
+ return 0
+ fi
+
+ echo "{" > $fn
+ if [[ "$t" == "bad_json" ]]; then
+ echo "asd: ; }" >> $fn
+ return 0
+ elif [[ "$t" == "empty_json" ]]; then
+ echo "}" >> $fn
+ return 0
+ fi
+
+ cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
+ lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
+ dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
+
+ if [[ "$t" == "all" ]]; then
+ echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
+ elif [[ "$t" == "cephx" ]]; then
+ echo "$cephx_secret" >> $fn
+ elif [[ "$t" == "no_cephx" ]]; then
+ echo "$lb_secret,$dmcrypt_key" >> $fn
+ elif [[ "$t" == "no_lockbox" ]]; then
+ echo "$cephx_secret,$dmcrypt_key" >> $fn
+ else
+ echo "unknown gen_secrets_file() type \'$fn\'"
+ return 1
+ fi
+ echo "}" >> $fn
+ return 0
+}
+
+function test_mon_osd_create_destroy()
+{
+ ceph osd new 2>&1 | grep 'EINVAL'
+ ceph osd new '' -1 2>&1 | grep 'EINVAL'
+ ceph osd new '' 10 2>&1 | grep 'EINVAL'
+
+ old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
+
+ old_osds=$(ceph osd ls)
+ num_osds=$(ceph osd ls | wc -l)
+
+ uuid=$(uuidgen)
+ id=$(ceph osd new $uuid 2>/dev/null)
+
+ for i in $old_osds; do
+ [[ "$i" != "$id" ]]
+ done
+
+ ceph osd find $id
+
+ id2=`ceph osd new $uuid 2>/dev/null`
+
+ [[ $id2 == $id ]]
+
+ ceph osd new $uuid $id
+
+ id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
+ ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
+
+ uuid2=$(uuidgen)
+ id2=$(ceph osd new $uuid2)
+ ceph osd find $id2
+ [[ "$id2" != "$id" ]]
+
+ ceph osd new $uuid $id2 2>&1 | grep EEXIST
+ ceph osd new $uuid2 $id2
+
+ # test with secrets
+ empty_secrets=$(gen_secrets_file "empty")
+ empty_json=$(gen_secrets_file "empty_json")
+ all_secrets=$(gen_secrets_file "all")
+ cephx_only=$(gen_secrets_file "cephx")
+ no_cephx=$(gen_secrets_file "no_cephx")
+ no_lockbox=$(gen_secrets_file "no_lockbox")
+ bad_json=$(gen_secrets_file "bad_json")
+
+ # empty secrets should be idempotent
+ new_id=$(ceph osd new $uuid $id -i $empty_secrets)
+ [[ "$new_id" == "$id" ]]
+
+ # empty json, thus empty secrets
+ new_id=$(ceph osd new $uuid $id -i $empty_json)
+ [[ "$new_id" == "$id" ]]
+
+ ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
+
+ ceph osd rm $id
+ ceph osd rm $id2
+ ceph osd setmaxosd $old_maxosd
+
+ ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
+ ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
+
+ osds=$(ceph osd ls)
+ id=$(ceph osd new $uuid -i $all_secrets)
+ for i in $osds; do
+ [[ "$i" != "$id" ]]
+ done
+
+ ceph osd find $id
+
+ # validate secrets and dm-crypt are set
+ k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
+ s=$(cat $all_secrets | jq '.cephx_secret')
+ [[ $k == $s ]]
+ k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
+ jq '.key')
+ s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
+ [[ $k == $s ]]
+ ceph config-key exists dm-crypt/osd/$uuid/luks
+
+ osds=$(ceph osd ls)
+ id2=$(ceph osd new $uuid2 -i $cephx_only)
+ for i in $osds; do
+ [[ "$i" != "$id2" ]]
+ done
+
+ ceph osd find $id2
+ k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
+ s=$(cat $all_secrets | jq '.cephx_secret')
+ [[ $k == $s ]]
+ expect_false ceph auth get-key client.osd-lockbox.$uuid2
+ expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
+
+ ceph osd destroy osd.$id2 --yes-i-really-mean-it
+ ceph osd destroy $id2 --yes-i-really-mean-it
+ ceph osd find $id2
+ expect_false ceph auth get-key osd.$id2
+ ceph osd dump | grep osd.$id2 | grep destroyed
+
+ id3=$id2
+ uuid3=$(uuidgen)
+ ceph osd new $uuid3 $id3 -i $all_secrets
+ ceph osd dump | grep osd.$id3 | expect_false grep destroyed
+ ceph auth get-key client.osd-lockbox.$uuid3
+ ceph auth get-key osd.$id3
+ ceph config-key exists dm-crypt/osd/$uuid3/luks
+
+ ceph osd purge-new osd.$id3 --yes-i-really-mean-it
+ expect_false ceph osd find $id2
+ expect_false ceph auth get-key osd.$id2
+ expect_false ceph auth get-key client.osd-lockbox.$uuid3
+ expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
+ ceph osd purge osd.$id3 --yes-i-really-mean-it
+ ceph osd purge-new osd.$id3 --yes-i-really-mean-it # idempotent
+
+ ceph osd purge osd.$id --yes-i-really-mean-it
+ ceph osd purge 123456 --yes-i-really-mean-it
+ expect_false ceph osd find $id
+ expect_false ceph auth get-key osd.$id
+ expect_false ceph auth get-key client.osd-lockbox.$uuid
+ expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
+
+ rm $empty_secrets $empty_json $all_secrets $cephx_only \
+ $no_cephx $no_lockbox $bad_json
+
+ for i in $(ceph osd ls); do
+ [[ "$i" != "$id" ]]
+ [[ "$i" != "$id2" ]]
+ [[ "$i" != "$id3" ]]
+ done
+
+ [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
+ ceph osd setmaxosd $old_maxosd
+
+}
+
+function test_mon_config_key()
+{
+ key=asdfasdfqwerqwreasdfuniquesa123df
+ ceph config-key list | grep -c $key | grep 0
+ ceph config-key get $key | grep -c bar | grep 0
+ ceph config-key set $key bar
+ ceph config-key get $key | grep bar
+ ceph config-key list | grep -c $key | grep 1
+ ceph config-key dump | grep $key | grep bar
+ ceph config-key rm $key
+ expect_false ceph config-key get $key
+ ceph config-key list | grep -c $key | grep 0
+ ceph config-key dump | grep -c $key | grep 0
+}
+
+function test_mon_osd()
+{
+ #
+ # osd blocklist
+ #
+ bl=192.168.0.1:0/1000
+ ceph osd blocklist add $bl
+ ceph osd blocklist ls | grep $bl
+ ceph osd blocklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
+ ceph osd dump --format=json-pretty | grep $bl
+ ceph osd dump | grep $bl
+ ceph osd blocklist rm $bl
+ ceph osd blocklist ls | expect_false grep $bl
+
+ bl=192.168.0.1
+ # test without nonce, invalid nonce
+ ceph osd blocklist add $bl
+ ceph osd blocklist ls | grep $bl
+ ceph osd blocklist rm $bl
+ ceph osd blocklist ls | expect_false grep $bl
+ expect_false "ceph osd blocklist add $bl/-1"
+ expect_false "ceph osd blocklist add $bl/foo"
+
+ # test with invalid address
+ expect_false "ceph osd blocklist add 1234.56.78.90/100"
+
+ # test range blocklisting
+ bl=192.168.0.1:0/24
+ ceph osd blocklist range add $bl
+ ceph osd blocklist ls | grep $bl
+ ceph osd blocklist range rm $bl
+ ceph osd blocklist ls | expect_false grep $bl
+ bad_bl=192.168.0.1/33
+ expect_false ceph osd blocklist range add $bad_bl
+
+ # Test `clear`
+ ceph osd blocklist add $bl
+ ceph osd blocklist ls | grep $bl
+ ceph osd blocklist clear
+ ceph osd blocklist ls | expect_false grep $bl
+
+ # deprecated syntax?
+ ceph osd blacklist ls
+
+ #
+ # osd crush
+ #
+ ceph osd crush reweight-all
+ ceph osd crush tunables legacy
+ ceph osd crush show-tunables | grep argonaut
+ ceph osd crush tunables bobtail
+ ceph osd crush show-tunables | grep bobtail
+ ceph osd crush tunables firefly
+ ceph osd crush show-tunables | grep firefly
+
+ ceph osd crush set-tunable straw_calc_version 0
+ ceph osd crush get-tunable straw_calc_version | grep 0
+ ceph osd crush set-tunable straw_calc_version 1
+ ceph osd crush get-tunable straw_calc_version | grep 1
+
+ #
+ # require-min-compat-client
+ expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
+ ceph osd get-require-min-compat-client | grep luminous
+ ceph osd dump | grep 'require_min_compat_client luminous'
+
+ #
+ # osd scrub
+ #
+
+ # blocking
+ ceph osd scrub 0 --block
+ ceph osd deep-scrub 0 --block
+
+ # how do I tell when these are done?
+ ceph osd scrub 0
+ ceph osd deep-scrub 0
+ ceph osd repair 0
+
+ # pool scrub, force-recovery/backfill
+ pool_names=`rados lspools`
+ for pool_name in $pool_names
+ do
+ ceph osd pool scrub $pool_name
+ ceph osd pool deep-scrub $pool_name
+ ceph osd pool repair $pool_name
+ ceph osd pool force-recovery $pool_name
+ ceph osd pool cancel-force-recovery $pool_name
+ ceph osd pool force-backfill $pool_name
+ ceph osd pool cancel-force-backfill $pool_name
+ done
+
+ for f in noup nodown noin noout noscrub nodeep-scrub nobackfill \
+ norebalance norecover notieragent noautoscale
+ do
+ ceph osd set $f
+ ceph osd unset $f
+ done
+ expect_false ceph osd set bogus
+ expect_false ceph osd unset bogus
+ for f in sortbitwise recover_deletes require_jewel_osds \
+ require_kraken_osds
+ do
+ expect_false ceph osd set $f
+ expect_false ceph osd unset $f
+ done
+ ceph osd require-osd-release reef
+ # can't lower
+ expect_false ceph osd require-osd-release quincy
+ expect_false ceph osd require-osd-release pacific
+ # these are no-ops but should succeed.
+
+ ceph osd set noup
+ ceph osd down 0
+ ceph osd dump | grep 'osd.0 down'
+ ceph osd unset noup
+ max_run=1000
+ for ((i=0; i < $max_run; i++)); do
+ if ! ceph osd dump | grep 'osd.0 up'; then
+ echo "waiting for osd.0 to come back up ($i/$max_run)"
+ sleep 1
+ else
+ break
+ fi
+ done
+ ceph osd dump | grep 'osd.0 up'
+
+ ceph osd dump | grep 'osd.0 up'
+ # ceph osd find expects the OsdName, so both ints and osd.n should work.
+ ceph osd find 1
+ ceph osd find osd.1
+ expect_false ceph osd find osd.xyz
+ expect_false ceph osd find xyz
+ expect_false ceph osd find 0.1
+ ceph --format plain osd find 1 # falls back to json-pretty
+ if [ `uname` == Linux ]; then
+ ceph osd metadata 1 | grep 'distro'
+ ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
+ fi
+ ceph osd out 0
+ ceph osd dump | grep 'osd.0.*out'
+ ceph osd in 0
+ ceph osd dump | grep 'osd.0.*in'
+ ceph osd find 0
+
+ ceph osd info 0
+ ceph osd info osd.0
+ expect_false ceph osd info osd.xyz
+ expect_false ceph osd info xyz
+ expect_false ceph osd info 42
+ expect_false ceph osd info osd.42
+
+ ceph osd info
+ info_json=$(ceph osd info --format=json | jq -cM '.')
+ dump_json=$(ceph osd dump --format=json | jq -cM '.osds')
+ if [[ "${info_json}" != "${dump_json}" ]]; then
+ echo "waiting for OSDs to settle"
+ sleep 10
+ info_json=$(ceph osd info --format=json | jq -cM '.')
+ dump_json=$(ceph osd dump --format=json | jq -cM '.osds')
+ [[ "${info_json}" == "${dump_json}" ]]
+ fi
+
+ info_json=$(ceph osd info 0 --format=json | jq -cM '.')
+ dump_json=$(ceph osd dump --format=json | \
+ jq -cM '.osds[] | select(.osd == 0)')
+ [[ "${info_json}" == "${dump_json}" ]]
+
+ info_plain="$(ceph osd info)"
+ dump_plain="$(ceph osd dump | grep '^osd')"
+ [[ "${info_plain}" == "${dump_plain}" ]]
+
+ info_plain="$(ceph osd info 0)"
+ dump_plain="$(ceph osd dump | grep '^osd.0')"
+ [[ "${info_plain}" == "${dump_plain}" ]]
+
+ ceph osd add-nodown 0 1
+ ceph health detail | grep 'NODOWN'
+ ceph osd rm-nodown 0 1
+ ! ceph health detail | grep 'NODOWN'
+
+ ceph osd out 0 # so we can mark it as noin later
+ ceph osd add-noin 0
+ ceph health detail | grep 'NOIN'
+ ceph osd rm-noin 0
+ ! ceph health detail | grep 'NOIN'
+ ceph osd in 0
+
+ ceph osd add-noout 0
+ ceph health detail | grep 'NOOUT'
+ ceph osd rm-noout 0
+ ! ceph health detail | grep 'NOOUT'
+
+ # test osd id parse
+ expect_false ceph osd add-noup 797er
+ expect_false ceph osd add-nodown u9uwer
+ expect_false ceph osd add-noin 78~15
+
+ expect_false ceph osd rm-noup 1234567
+ expect_false ceph osd rm-nodown fsadf7
+ expect_false ceph osd rm-noout 790-fd
+
+ ids=`ceph osd ls-tree default`
+ for osd in $ids
+ do
+ ceph osd add-nodown $osd
+ ceph osd add-noout $osd
+ done
+ ceph -s | grep 'NODOWN'
+ ceph -s | grep 'NOOUT'
+ ceph osd rm-nodown any
+ ceph osd rm-noout all
+ ! ceph -s | grep 'NODOWN'
+ ! ceph -s | grep 'NOOUT'
+
+ # test crush node flags
+ ceph osd add-noup osd.0
+ ceph osd add-nodown osd.0
+ ceph osd add-noin osd.0
+ ceph osd add-noout osd.0
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
+ ceph osd rm-noup osd.0
+ ceph osd rm-nodown osd.0
+ ceph osd rm-noin osd.0
+ ceph osd rm-noout osd.0
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
+
+ ceph osd crush add-bucket foo host root=default
+ ceph osd add-noup foo
+ ceph osd add-nodown foo
+ ceph osd add-noin foo
+ ceph osd add-noout foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
+ ceph osd rm-noup foo
+ ceph osd rm-nodown foo
+ ceph osd rm-noin foo
+ ceph osd rm-noout foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
+ ceph osd add-noup foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
+ ceph osd crush rm foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
+
+ ceph osd set-group noup osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
+ ceph osd set-group noup,nodown osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
+ ceph osd set-group noup,nodown,noin osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
+ ceph osd set-group noup,nodown,noin,noout osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
+ ceph osd unset-group noup osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
+ ceph osd unset-group noup,nodown osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
+ ceph osd unset-group noup,nodown,noin osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
+ ceph osd unset-group noup,nodown,noin,noout osd.0
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
+
+ ceph osd set-group noup,nodown,noin,noout osd.0 osd.1
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
+ ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noout'
+ ceph osd unset-group noup,nodown,noin,noout osd.0 osd.1
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
+ ceph osd dump -f json-pretty | jq ".osds[1].state" | expect_false grep 'noup\|nodown\|noin\|noout'
+
+ ceph osd set-group noup all
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
+ ceph osd unset-group noup all
+ ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
+
+ # crush node flags
+ ceph osd crush add-bucket foo host root=default
+ ceph osd set-group noup foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
+ ceph osd set-group noup,nodown foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
+ ceph osd set-group noup,nodown,noin foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
+ ceph osd set-group noup,nodown,noin,noout foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
+
+ ceph osd unset-group noup foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
+ ceph osd unset-group noup,nodown foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
+ ceph osd unset-group noup,nodown,noin foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
+ ceph osd unset-group noup,nodown,noin,noout foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin\|noout'
+
+ ceph osd set-group noin,noout foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
+ ceph osd unset-group noin,noout foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
+
+ ceph osd set-group noup,nodown,noin,noout foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
+ ceph osd crush rm foo
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
+
+ # test device class flags
+ osd_0_device_class=$(ceph osd crush get-device-class osd.0)
+ ceph osd set-group noup $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
+ ceph osd set-group noup,nodown $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
+ ceph osd set-group noup,nodown,noin $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
+ ceph osd set-group noup,nodown,noin,noout $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
+
+ ceph osd unset-group noup $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
+ ceph osd unset-group noup,nodown $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
+ ceph osd unset-group noup,nodown,noin $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
+ ceph osd unset-group noup,nodown,noin,noout $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin\|noout'
+
+ ceph osd set-group noin,noout $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
+ ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
+ ceph osd unset-group noin,noout $osd_0_device_class
+ ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep $osd_0_device_class
+
+ # make sure mark out preserves weight
+ ceph osd reweight osd.0 .5
+ ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
+ ceph osd out 0
+ ceph osd in 0
+ ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
+
+ ceph osd getmap -o $f
+ [ -s $f ]
+ rm $f
+ save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
+ [ "$save" -gt 0 ]
+ ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
+ ceph osd setmaxosd 10
+ ceph osd getmaxosd | grep 'max_osd = 10'
+ ceph osd setmaxosd $save
+ ceph osd getmaxosd | grep "max_osd = $save"
+
+ for id in `ceph osd ls` ; do
+ retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
+ done
+
+ ceph osd rm 0 2>&1 | grep 'EBUSY'
+
+ local old_osds=$(echo $(ceph osd ls))
+ id=`ceph osd create`
+ ceph osd find $id
+ ceph osd lost $id --yes-i-really-mean-it
+ expect_false ceph osd setmaxosd $id
+ local new_osds=$(echo $(ceph osd ls))
+ for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
+ ceph osd rm $id
+ done
+
+ uuid=`uuidgen`
+ id=`ceph osd create $uuid`
+ id2=`ceph osd create $uuid`
+ [ "$id" = "$id2" ]
+ ceph osd rm $id
+
+ ceph --help osd
+
+ # reset max_osd.
+ ceph osd setmaxosd $id
+ ceph osd getmaxosd | grep "max_osd = $save"
+ local max_osd=$save
+
+ ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
+ ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
+
+ id=`ceph osd create $uuid $max_osd`
+ [ "$id" = "$max_osd" ]
+ ceph osd find $id
+ max_osd=$((max_osd + 1))
+ ceph osd getmaxosd | grep "max_osd = $max_osd"
+
+ ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
+ ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
+ id2=`ceph osd create $uuid`
+ [ "$id" = "$id2" ]
+ id2=`ceph osd create $uuid $id`
+ [ "$id" = "$id2" ]
+
+ uuid=`uuidgen`
+ local gap_start=$max_osd
+ id=`ceph osd create $uuid $((gap_start + 100))`
+ [ "$id" = "$((gap_start + 100))" ]
+ max_osd=$((id + 1))
+ ceph osd getmaxosd | grep "max_osd = $max_osd"
+
+ ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
+
+ #
+ # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
+ # is repeated and consumes two osd id, not just one.
+ #
+ local next_osd=$gap_start
+ id=`ceph osd create $(uuidgen)`
+ [ "$id" = "$next_osd" ]
+
+ next_osd=$((id + 1))
+ id=`ceph osd create $(uuidgen) $next_osd`
+ [ "$id" = "$next_osd" ]
+
+ local new_osds=$(echo $(ceph osd ls))
+ for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
+ [ $id -ge $save ]
+ ceph osd rm $id
+ done
+ ceph osd setmaxosd $save
+
+ ceph osd ls
+ ceph osd pool create data 16
+ ceph osd pool application enable data rados
+ ceph osd lspools | grep data
+ ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
+ ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
+ ceph osd pool delete data data --yes-i-really-really-mean-it
+
+ ceph osd pause
+ ceph osd dump | grep 'flags.*pauserd,pausewr'
+ ceph osd unpause
+
+ ceph osd tree
+ ceph osd tree up
+ ceph osd tree down
+ ceph osd tree in
+ ceph osd tree out
+ ceph osd tree destroyed
+ ceph osd tree up in
+ ceph osd tree up out
+ ceph osd tree down in
+ ceph osd tree down out
+ ceph osd tree out down
+ expect_false ceph osd tree up down
+ expect_false ceph osd tree up destroyed
+ expect_false ceph osd tree down destroyed
+ expect_false ceph osd tree up down destroyed
+ expect_false ceph osd tree in out
+ expect_false ceph osd tree up foo
+
+ ceph osd metadata
+ ceph osd count-metadata os
+ ceph osd versions
+
+ ceph osd perf
+ ceph osd blocked-by
+
+ ceph osd stat | grep up
+}
+
+function test_mon_crush()
+{
+ f=$TEMP_DIR/map.$$
+ epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
+ [ -s $f ]
+ [ "$epoch" -gt 1 ]
+ nextepoch=$(( $epoch + 1 ))
+ echo epoch $epoch nextepoch $nextepoch
+ rm -f $f.epoch
+ expect_false ceph osd setcrushmap $nextepoch -i $f
+ gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
+ echo gotepoch $gotepoch
+ [ "$gotepoch" -eq "$nextepoch" ]
+ # should be idempotent
+ gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
+ echo epoch $gotepoch
+ [ "$gotepoch" -eq "$nextepoch" ]
+ rm $f
+}
+
+function test_mon_osd_pool()
+{
+ #
+ # osd pool
+ #
+ ceph osd pool create data 16
+ ceph osd pool application enable data rados
+ ceph osd pool mksnap data datasnap
+ rados -p data lssnap | grep datasnap
+ ceph osd pool rmsnap data datasnap
+ expect_false ceph osd pool rmsnap pool_fake snapshot
+ ceph osd pool delete data data --yes-i-really-really-mean-it
+
+ ceph osd pool create data2 16
+ ceph osd pool application enable data2 rados
+ ceph osd pool rename data2 data3
+ ceph osd lspools | grep data3
+ ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
+
+ ceph osd pool create replicated 16 16 replicated
+ ceph osd pool create replicated 1 16 replicated
+ ceph osd pool create replicated 16 16 # default is replicated
+ ceph osd pool create replicated 16 # default is replicated, pgp_num = pg_num
+ ceph osd pool application enable replicated rados
+ # should fail because the type is not the same
+ expect_false ceph osd pool create replicated 16 16 erasure
+ ceph osd lspools | grep replicated
+ ceph osd pool create ec_test 1 1 erasure
+ ceph osd pool application enable ec_test rados
+ set +e
+ ceph osd count-metadata osd_objectstore | grep 'bluestore'
+ if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
+ ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
+ check_response "pool must only be stored on bluestore for scrubbing to work" $? 22
+ else
+ ceph osd pool set ec_test allow_ec_overwrites true || return 1
+ expect_false ceph osd pool set ec_test allow_ec_overwrites false
+ fi
+ set -e
+ ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
+ ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
+
+ # test create pool with rule
+ ceph osd erasure-code-profile set foo foo
+ ceph osd erasure-code-profile ls | grep foo
+ ceph osd crush rule create-erasure foo foo
+ ceph osd pool create erasure 16 16 erasure foo
+ expect_false ceph osd erasure-code-profile rm foo
+ ceph osd pool delete erasure erasure --yes-i-really-really-mean-it
+ ceph osd crush rule rm foo
+ ceph osd erasure-code-profile rm foo
+
+ # autoscale mode
+ ceph osd pool create modeon --autoscale-mode=on
+ ceph osd dump | grep modeon | grep 'autoscale_mode on'
+ ceph osd pool create modewarn --autoscale-mode=warn
+ ceph osd dump | grep modewarn | grep 'autoscale_mode warn'
+ ceph osd pool create modeoff --autoscale-mode=off
+ ceph osd dump | grep modeoff | grep 'autoscale_mode off'
+ ceph osd pool delete modeon modeon --yes-i-really-really-mean-it
+ ceph osd pool delete modewarn modewarn --yes-i-really-really-mean-it
+ ceph osd pool delete modeoff modeoff --yes-i-really-really-mean-it
+}
+
+function test_mon_osd_pool_quota()
+{
+ #
+ # test osd pool set/get quota
+ #
+
+ # create tmp pool
+ ceph osd pool create tmp-quota-pool 32
+ ceph osd pool application enable tmp-quota-pool rados
+ #
+ # set erroneous quotas
+ #
+ expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
+ expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
+ expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
+ #
+ # set valid quotas
+ #
+ ceph osd pool set-quota tmp-quota-pool max_bytes 10
+ ceph osd pool set-quota tmp-quota-pool max_objects 10M
+ #
+ # get quotas in json-pretty format
+ #
+ ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
+ grep '"quota_max_objects":.*10000000'
+ ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
+ grep '"quota_max_bytes":.*10'
+ #
+ # get quotas
+ #
+ ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 B'
+ ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10.*M objects'
+ #
+ # set valid quotas with unit prefix
+ #
+ ceph osd pool set-quota tmp-quota-pool max_bytes 10K
+ #
+ # get quotas
+ #
+ ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
+ #
+ # set valid quotas with unit prefix
+ #
+ ceph osd pool set-quota tmp-quota-pool max_bytes 10Ki
+ #
+ # get quotas
+ #
+ ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
+ #
+ #
+ # reset pool quotas
+ #
+ ceph osd pool set-quota tmp-quota-pool max_bytes 0
+ ceph osd pool set-quota tmp-quota-pool max_objects 0
+ #
+ # test N/A quotas
+ #
+ ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
+ ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
+ #
+ # cleanup tmp pool
+ ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
+}
+
+function test_mon_pg()
+{
+ # Make sure we start healthy.
+ wait_for_health_ok
+
+ ceph pg debug unfound_objects_exist
+ ceph pg debug degraded_pgs_exist
+ ceph pg deep-scrub 1.0
+ ceph pg dump
+ ceph pg dump pgs_brief --format=json
+ ceph pg dump pgs --format=json
+ ceph pg dump pools --format=json
+ ceph pg dump osds --format=json
+ ceph pg dump sum --format=json
+ ceph pg dump all --format=json
+ ceph pg dump pgs_brief osds --format=json
+ ceph pg dump pools osds pgs_brief --format=json
+ ceph pg dump_json
+ ceph pg dump_pools_json
+ ceph pg dump_stuck inactive
+ ceph pg dump_stuck unclean
+ ceph pg dump_stuck stale
+ ceph pg dump_stuck undersized
+ ceph pg dump_stuck degraded
+ ceph pg ls
+ ceph pg ls 1
+ ceph pg ls stale
+ expect_false ceph pg ls scrubq
+ ceph pg ls active stale repair recovering
+ ceph pg ls 1 active
+ ceph pg ls 1 active stale
+ ceph pg ls-by-primary osd.0
+ ceph pg ls-by-primary osd.0 1
+ ceph pg ls-by-primary osd.0 active
+ ceph pg ls-by-primary osd.0 active stale
+ ceph pg ls-by-primary osd.0 1 active stale
+ ceph pg ls-by-osd osd.0
+ ceph pg ls-by-osd osd.0 1
+ ceph pg ls-by-osd osd.0 active
+ ceph pg ls-by-osd osd.0 active stale
+ ceph pg ls-by-osd osd.0 1 active stale
+ ceph pg ls-by-pool rbd
+ ceph pg ls-by-pool rbd active stale
+ # can't test this...
+ # ceph pg force_create_pg
+ ceph pg getmap -o $TEMP_DIR/map.$$
+ [ -s $TEMP_DIR/map.$$ ]
+ ceph pg map 1.0 | grep acting
+ ceph pg repair 1.0
+ ceph pg scrub 1.0
+
+ ceph osd set-full-ratio .962
+ ceph osd dump | grep '^full_ratio 0.962'
+ ceph osd set-backfillfull-ratio .912
+ ceph osd dump | grep '^backfillfull_ratio 0.912'
+ ceph osd set-nearfull-ratio .892
+ ceph osd dump | grep '^nearfull_ratio 0.892'
+
+ # Check health status
+ ceph osd set-nearfull-ratio .913
+ ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
+ ceph health detail | grep OSD_OUT_OF_ORDER_FULL
+ ceph osd set-nearfull-ratio .892
+ ceph osd set-backfillfull-ratio .963
+ ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
+ ceph health detail | grep OSD_OUT_OF_ORDER_FULL
+ ceph osd set-backfillfull-ratio .912
+
+ # Check injected full results
+ $SUDO ceph tell osd.0 injectfull nearfull
+ wait_for_health "OSD_NEARFULL"
+ ceph health detail | grep "osd.0 is near full"
+ $SUDO ceph tell osd.0 injectfull none
+ wait_for_health_ok
+
+ $SUDO ceph tell osd.1 injectfull backfillfull
+ wait_for_health "OSD_BACKFILLFULL"
+ ceph health detail | grep "osd.1 is backfill full"
+ $SUDO ceph tell osd.1 injectfull none
+ wait_for_health_ok
+
+ $SUDO ceph tell osd.2 injectfull failsafe
+ # failsafe and full are the same as far as the monitor is concerned
+ wait_for_health "OSD_FULL"
+ ceph health detail | grep "osd.2 is full"
+ $SUDO ceph tell osd.2 injectfull none
+ wait_for_health_ok
+
+ $SUDO ceph tell osd.0 injectfull full
+ wait_for_health "OSD_FULL"
+ ceph health detail | grep "osd.0 is full"
+ $SUDO ceph tell osd.0 injectfull none
+ wait_for_health_ok
+
+ ceph pg stat | grep 'pgs:'
+ ceph pg 1.0 query
+ ceph tell 1.0 query
+ first=$(ceph mon dump -f json | jq -r '.mons[0].name')
+ ceph tell mon.$first quorum enter
+ ceph quorum_status
+ ceph report | grep osd_stats
+ ceph status
+ ceph -s
+
+ #
+ # tell osd version
+ #
+ ceph tell osd.0 version
+ expect_false ceph tell osd.9999 version
+ expect_false ceph tell osd.foo version
+
+ # back to pg stuff
+
+ ceph tell osd.0 dump_pg_recovery_stats | grep Started
+
+ ceph osd reweight 0 0.9
+ expect_false ceph osd reweight 0 -1
+ ceph osd reweight osd.0 1
+
+ ceph osd primary-affinity osd.0 .9
+ expect_false ceph osd primary-affinity osd.0 -2
+ expect_false ceph osd primary-affinity osd.9999 .5
+ ceph osd primary-affinity osd.0 1
+
+ ceph osd pool set rbd size 2
+ ceph osd pg-temp 1.0 0 1
+ ceph osd pg-temp 1.0 osd.1 osd.0
+ expect_false ceph osd pg-temp 1.0 0 1 2
+ expect_false ceph osd pg-temp asdf qwer
+ expect_false ceph osd pg-temp 1.0 asdf
+ ceph osd pg-temp 1.0 # cleanup pg-temp
+
+ ceph pg repeer 1.0
+ expect_false ceph pg repeer 0.0 # pool 0 shouldn't exist anymore
+
+ # don't test ceph osd primary-temp for now
+}
+
+function test_mon_osd_pool_set()
+{
+ TEST_POOL_GETSET=pool_getset
+ expect_false ceph osd pool create $TEST_POOL_GETSET 1 --target_size_ratio -0.3
+ expect_true ceph osd pool create $TEST_POOL_GETSET 1 --target_size_ratio 1
+ ceph osd pool application enable $TEST_POOL_GETSET rados
+ ceph osd pool set $TEST_POOL_GETSET pg_autoscale_mode off
+ wait_for_clean
+ ceph osd pool get $TEST_POOL_GETSET all
+
+ for s in pg_num pgp_num size min_size crush_rule target_size_ratio; do
+ ceph osd pool get $TEST_POOL_GETSET $s
+ done
+
+ old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
+ (( new_size = old_size + 1 ))
+ ceph osd pool set $TEST_POOL_GETSET size $new_size --yes-i-really-mean-it
+ ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
+ ceph osd pool set $TEST_POOL_GETSET size $old_size --yes-i-really-mean-it
+
+ ceph osd pool create pool_erasure 1 1 erasure
+ ceph osd pool application enable pool_erasure rados
+ wait_for_clean
+ set +e
+ ceph osd pool set pool_erasure size 4444 2>$TMPFILE
+ check_response 'not change the size'
+ set -e
+ ceph osd pool get pool_erasure erasure_code_profile
+ ceph osd pool rm pool_erasure pool_erasure --yes-i-really-really-mean-it
+
+ for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub bulk; do
+ ceph osd pool set $TEST_POOL_GETSET $flag false
+ ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
+ ceph osd pool set $TEST_POOL_GETSET $flag true
+ ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
+ ceph osd pool set $TEST_POOL_GETSET $flag 1
+ ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
+ ceph osd pool set $TEST_POOL_GETSET $flag 0
+ ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
+ expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
+ expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
+ done
+
+ ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
+ ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
+ ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
+ ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
+ ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
+
+ ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
+ ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
+ ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
+ ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
+ ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
+
+ ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
+ ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
+ ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
+ ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
+ ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
+
+ ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
+ ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
+ ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
+ ceph osd pool set $TEST_POOL_GETSET recovery_priority -5
+ ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: -5'
+ ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
+ ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
+ expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority -11
+ expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority 11
+
+ ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
+ ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
+ ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
+ ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
+ ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
+
+ ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
+ ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
+ ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
+ ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
+ ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
+
+ expect_false ceph osd pool set $TEST_POOL_GETSET target_size_ratio -3
+ expect_false ceph osd pool set $TEST_POOL_GETSET target_size_ratio abc
+ expect_true ceph osd pool set $TEST_POOL_GETSET target_size_ratio 0.1
+ expect_true ceph osd pool set $TEST_POOL_GETSET target_size_ratio 1
+ ceph osd pool get $TEST_POOL_GETSET target_size_ratio | grep 'target_size_ratio: 1'
+
+ ceph osd pool set $TEST_POOL_GETSET nopgchange 1
+ expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
+ expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
+ ceph osd pool set $TEST_POOL_GETSET nopgchange 0
+ ceph osd pool set $TEST_POOL_GETSET pg_num 10
+ wait_for_clean
+ ceph osd pool set $TEST_POOL_GETSET pgp_num 10
+ expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 0
+ expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 0
+
+ old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
+ new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32))
+ ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
+ ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
+ wait_for_clean
+
+ ceph osd pool set $TEST_POOL_GETSET nosizechange 1
+ expect_false ceph osd pool set $TEST_POOL_GETSET size 2
+ expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
+ ceph osd pool set $TEST_POOL_GETSET nosizechange 0
+ ceph osd pool set $TEST_POOL_GETSET size 2
+ wait_for_clean
+ ceph osd pool set $TEST_POOL_GETSET min_size 2
+
+ expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
+ ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
+
+ expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
+ ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
+
+ ceph osd pool get rbd crush_rule | grep 'crush_rule: '
+
+ ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
+ ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
+ ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
+ ceph osd pool set $TEST_POOL_GETSET compression_mode unset
+ ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
+
+ ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
+ ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
+ ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
+ ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
+ ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
+
+ ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
+ expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
+ expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
+ ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
+ ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
+ ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
+ ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
+
+ ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
+ ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
+ ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
+ ceph osd pool set $TEST_POOL_GETSET csum_type unset
+ ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
+
+ for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
+ ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
+ ceph osd pool set $TEST_POOL_GETSET $size 100
+ ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
+ ceph osd pool set $TEST_POOL_GETSET $size 0
+ ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
+ done
+
+ ceph osd pool set $TEST_POOL_GETSET nodelete 1
+ expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
+ ceph osd pool set $TEST_POOL_GETSET nodelete 0
+ ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
+
+}
+
+function test_mon_osd_tiered_pool_set()
+{
+ # this is really a tier pool
+ ceph osd pool create real-tier 2
+ ceph osd tier add rbd real-tier
+
+ # expect us to be unable to set negative values for hit_set_*
+ for o in hit_set_period hit_set_count hit_set_fpp; do
+ expect_false ceph osd pool set real_tier $o -1
+ done
+
+ # and hit_set_fpp should be in range 0..1
+ expect_false ceph osd pool set real_tier hit_set_fpp 2
+
+ ceph osd pool set real-tier hit_set_type explicit_hash
+ ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
+ ceph osd pool set real-tier hit_set_type explicit_object
+ ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
+ ceph osd pool set real-tier hit_set_type bloom
+ ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
+ expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
+ ceph osd pool set real-tier hit_set_period 123
+ ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
+ ceph osd pool set real-tier hit_set_count 12
+ ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
+ ceph osd pool set real-tier hit_set_fpp .01
+ ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
+
+ ceph osd pool set real-tier target_max_objects 123
+ ceph osd pool get real-tier target_max_objects | \
+ grep 'target_max_objects:[ \t]\+123'
+ ceph osd pool set real-tier target_max_bytes 123456
+ ceph osd pool get real-tier target_max_bytes | \
+ grep 'target_max_bytes:[ \t]\+123456'
+ ceph osd pool set real-tier cache_target_dirty_ratio .123
+ ceph osd pool get real-tier cache_target_dirty_ratio | \
+ grep 'cache_target_dirty_ratio:[ \t]\+0.123'
+ expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
+ expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
+ ceph osd pool set real-tier cache_target_dirty_high_ratio .123
+ ceph osd pool get real-tier cache_target_dirty_high_ratio | \
+ grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
+ expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
+ expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
+ ceph osd pool set real-tier cache_target_full_ratio .123
+ ceph osd pool get real-tier cache_target_full_ratio | \
+ grep 'cache_target_full_ratio:[ \t]\+0.123'
+ ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
+ ceph osd pool set real-tier cache_target_full_ratio 1.0
+ ceph osd pool set real-tier cache_target_full_ratio 0
+ expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
+ ceph osd pool set real-tier cache_min_flush_age 123
+ ceph osd pool get real-tier cache_min_flush_age | \
+ grep 'cache_min_flush_age:[ \t]\+123'
+ ceph osd pool set real-tier cache_min_evict_age 234
+ ceph osd pool get real-tier cache_min_evict_age | \
+ grep 'cache_min_evict_age:[ \t]\+234'
+
+ # iec vs si units
+ ceph osd pool set real-tier target_max_objects 1K
+ ceph osd pool get real-tier target_max_objects | grep 1000
+ for o in target_max_bytes target_size_bytes compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
+ ceph osd pool set real-tier $o 1Ki # no i suffix
+ val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
+ [[ $val == 1024 ]]
+ ceph osd pool set real-tier $o 1M # with i suffix
+ val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
+ [[ $val == 1048576 ]]
+ done
+
+ # this is not a tier pool
+ ceph osd pool create fake-tier 2
+ ceph osd pool application enable fake-tier rados
+ wait_for_clean
+
+ expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
+ expect_false ceph osd pool get fake-tier hit_set_type
+ expect_false ceph osd pool set fake-tier hit_set_type explicit_object
+ expect_false ceph osd pool get fake-tier hit_set_type
+ expect_false ceph osd pool set fake-tier hit_set_type bloom
+ expect_false ceph osd pool get fake-tier hit_set_type
+ expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
+ expect_false ceph osd pool set fake-tier hit_set_period 123
+ expect_false ceph osd pool get fake-tier hit_set_period
+ expect_false ceph osd pool set fake-tier hit_set_count 12
+ expect_false ceph osd pool get fake-tier hit_set_count
+ expect_false ceph osd pool set fake-tier hit_set_fpp .01
+ expect_false ceph osd pool get fake-tier hit_set_fpp
+
+ expect_false ceph osd pool set fake-tier target_max_objects 123
+ expect_false ceph osd pool get fake-tier target_max_objects
+ expect_false ceph osd pool set fake-tier target_max_bytes 123456
+ expect_false ceph osd pool get fake-tier target_max_bytes
+ expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
+ expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
+ expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
+ expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
+ expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
+ expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
+ expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
+ expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
+ expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
+ expect_false ceph osd pool get fake-tier cache_target_full_ratio
+ expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
+ expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
+ expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
+ expect_false ceph osd pool set fake-tier cache_min_flush_age 123
+ expect_false ceph osd pool get fake-tier cache_min_flush_age
+ expect_false ceph osd pool set fake-tier cache_min_evict_age 234
+ expect_false ceph osd pool get fake-tier cache_min_evict_age
+
+ ceph osd tier remove rbd real-tier
+ ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
+ ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
+}
+
+function test_mon_osd_erasure_code()
+{
+
+ ceph osd erasure-code-profile set fooprofile a=b c=d
+ ceph osd erasure-code-profile set fooprofile a=b c=d
+ expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
+ ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
+ ceph osd erasure-code-profile set fooprofile a=b c=d e=f
+ expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
+ # make sure rule-foo doesn't work anymore
+ expect_false ceph osd erasure-code-profile set barprofile ruleset-failure-domain=host
+ ceph osd erasure-code-profile set barprofile crush-failure-domain=host
+ # clean up
+ ceph osd erasure-code-profile rm fooprofile
+ ceph osd erasure-code-profile rm barprofile
+
+ # try weird k and m values
+ expect_false ceph osd erasure-code-profile set badk k=1 m=1
+ expect_false ceph osd erasure-code-profile set badk k=1 m=2
+ expect_false ceph osd erasure-code-profile set badk k=0 m=2
+ expect_false ceph osd erasure-code-profile set badk k=-1 m=2
+ expect_false ceph osd erasure-code-profile set badm k=2 m=0
+ expect_false ceph osd erasure-code-profile set badm k=2 m=-1
+ ceph osd erasure-code-profile set good k=2 m=1
+ ceph osd erasure-code-profile rm good
+}
+
+function test_mon_osd_misc()
+{
+ set +e
+
+ # expect error about missing 'pool' argument
+ ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
+
+ # expect error about unused argument foo
+ ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
+
+ # expect "not in range" for invalid overload percentage
+ ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
+
+ set -e
+
+ local old_bytes_per_osd=$(ceph config get mgr mon_reweight_min_bytes_per_osd)
+ local old_pgs_per_osd=$(ceph config get mgr mon_reweight_min_pgs_per_osd)
+ # otherwise ceph-mgr complains like:
+ # Error EDOM: Refusing to reweight: we only have 5372 kb used across all osds!
+ # Error EDOM: Refusing to reweight: we only have 20 PGs across 3 osds!
+ ceph config set mgr mon_reweight_min_bytes_per_osd 0
+ ceph config set mgr mon_reweight_min_pgs_per_osd 0
+ ceph osd reweight-by-utilization 110
+ ceph osd reweight-by-utilization 110 .5
+ expect_false ceph osd reweight-by-utilization 110 0
+ expect_false ceph osd reweight-by-utilization 110 -0.1
+ ceph osd test-reweight-by-utilization 110 .5 --no-increasing
+ ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
+ expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
+ expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
+ ceph osd reweight-by-pg 110
+ ceph osd test-reweight-by-pg 110 .5
+ ceph osd reweight-by-pg 110 rbd
+ ceph osd reweight-by-pg 110 .5 rbd
+ expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
+ # restore the setting
+ ceph config set mgr mon_reweight_min_bytes_per_osd $old_bytes_per_osd
+ ceph config set mgr mon_reweight_min_pgs_per_osd $old_pgs_per_osd
+}
+
+function test_admin_heap_profiler()
+{
+ do_test=1
+ set +e
+ # expect 'heap' commands to be correctly parsed
+ ceph tell osd.0 heap stats 2>$TMPFILE
+ if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
+ echo "tcmalloc not enabled; skip heap profiler test"
+ do_test=0
+ fi
+ set -e
+
+ [[ $do_test -eq 0 ]] && return 0
+
+ $SUDO ceph tell osd.0 heap start_profiler
+ $SUDO ceph tell osd.0 heap dump
+ $SUDO ceph tell osd.0 heap stop_profiler
+ $SUDO ceph tell osd.0 heap release
+}
+
+function test_osd_bench()
+{
+ # test osd bench limits
+ # As we should not rely on defaults (as they may change over time),
+ # lets inject some values and perform some simple tests
+ # max iops: 10 # 100 IOPS
+ # max throughput: 10485760 # 10MB/s
+ # max block size: 2097152 # 2MB
+ # duration: 10 # 10 seconds
+
+ local args="\
+ --osd-bench-duration 10 \
+ --osd-bench-max-block-size 2097152 \
+ --osd-bench-large-size-max-throughput 10485760 \
+ --osd-bench-small-size-max-iops 10"
+ ceph tell osd.0 injectargs ${args## }
+
+ # anything with a bs larger than 2097152 must fail
+ expect_false ceph tell osd.0 bench 1 2097153
+ # but using 'osd_bench_max_bs' must succeed
+ ceph tell osd.0 bench 1 2097152
+
+ # we assume 1MB as a large bs; anything lower is a small bs
+ # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
+ # max count: 409600 (bytes)
+
+ # more than max count must not be allowed
+ expect_false ceph tell osd.0 bench 409601 4096
+ # but 409600 must be succeed
+ ceph tell osd.0 bench 409600 4096
+
+ # for a large bs, we are limited by throughput.
+ # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
+ # the max count will be (10MB * 10s) = 100MB
+ # max count: 104857600 (bytes)
+
+ # more than max count must not be allowed
+ expect_false ceph tell osd.0 bench 104857601 2097152
+ # up to max count must be allowed
+ ceph tell osd.0 bench 104857600 2097152
+}
+
+function test_osd_negative_filestore_merge_threshold()
+{
+ $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
+ expect_config_value "osd.0" "filestore_merge_threshold" -1
+}
+
+function test_mon_tell()
+{
+ for m in mon.a mon.b; do
+ ceph tell $m sessions
+ ceph_watch_start debug audit
+ ceph tell mon.a sessions
+ ceph_watch_wait "${m} \[DBG\] from.*cmd='sessions' args=\[\]: dispatch"
+ done
+ expect_false ceph tell mon.foo version
+}
+
+function test_mon_ping()
+{
+ ceph ping mon.a
+ ceph ping mon.b
+ expect_false ceph ping mon.foo
+
+ ceph ping mon.\*
+}
+
+function test_mon_deprecated_commands()
+{
+ # current DEPRECATED commands are marked with FLAG(DEPRECATED)
+ #
+ # Testing should be accomplished by setting
+ # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
+ # each one of these commands.
+
+ ceph tell mon.* injectargs '--mon-debug-deprecated-as-obsolete'
+ expect_false ceph config-key list 2> $TMPFILE
+ check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
+
+ ceph tell mon.* injectargs '--no-mon-debug-deprecated-as-obsolete'
+}
+
+function test_mon_cephdf_commands()
+{
+ # ceph df detail:
+ # pool section:
+ # RAW USED The near raw used per pool in raw total
+
+ ceph osd pool create cephdf_for_test 1 1 replicated
+ ceph osd pool application enable cephdf_for_test rados
+ ceph osd pool set cephdf_for_test size 2
+
+ dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
+ rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
+
+ #wait for update
+ for i in `seq 1 10`; do
+ rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
+ sleep 1
+ done
+ # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
+ # to sync mon with osd
+ flush_pg_stats
+ local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
+ stored=`ceph df detail --format=json | jq "$jq_filter.stored * 2"`
+ stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
+
+ ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
+ rm ./cephdf_for_test
+
+ expect_false test $stored != $stored_raw
+}
+
+function test_mon_pool_application()
+{
+ ceph osd pool create app_for_test 16
+
+ ceph osd pool application enable app_for_test rbd
+ expect_false ceph osd pool application enable app_for_test rgw
+ ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it
+ ceph osd pool ls detail | grep "application rbd,rgw"
+ ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
+
+ expect_false ceph osd pool application set app_for_test cephfs key value
+ ceph osd pool application set app_for_test rbd key1 value1
+ ceph osd pool application set app_for_test rbd key2 value2
+ ceph osd pool application set app_for_test rgw key1 value1
+ ceph osd pool application get app_for_test rbd key1 | grep 'value1'
+ ceph osd pool application get app_for_test rbd key2 | grep 'value2'
+ ceph osd pool application get app_for_test rgw key1 | grep 'value1'
+
+ ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
+
+ ceph osd pool application rm app_for_test rgw key1
+ ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
+ ceph osd pool application rm app_for_test rbd key2
+ ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
+ ceph osd pool application rm app_for_test rbd key1
+ ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
+ ceph osd pool application rm app_for_test rbd key1 # should be idempotent
+
+ expect_false ceph osd pool application disable app_for_test rgw
+ ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
+ ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent
+ ceph osd pool ls detail | grep "application rbd"
+ ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}'
+
+ ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
+ ceph osd pool ls detail | grep -v "application "
+ ceph osd pool ls detail --format=json | grep '"application_metadata":{}'
+
+ ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it
+}
+
+function test_mon_tell_help_command()
+{
+ ceph tell mon.a help | grep sync_force
+ ceph tell mon.a -h | grep sync_force
+ ceph tell mon.a config -h | grep 'config diff get'
+
+ # wrong target
+ expect_false ceph tell mon.zzz help
+}
+
+function test_mon_stdin_stdout()
+{
+ echo foo | ceph config-key set test_key -i -
+ ceph config-key get test_key -o - | grep -c foo | grep -q 1
+}
+
+function test_osd_tell_help_command()
+{
+ ceph tell osd.1 help
+ expect_false ceph tell osd.100 help
+}
+
+function test_osd_compact()
+{
+ ceph tell osd.1 compact
+ $SUDO ceph daemon osd.1 compact
+}
+
+function test_mds_tell_help_command()
+{
+ local FS_NAME=cephfs
+ if ! mds_exists ; then
+ echo "Skipping test, no MDS found"
+ return
+ fi
+
+ remove_all_fs
+ ceph osd pool create fs_data 16
+ ceph osd pool create fs_metadata 16
+ ceph fs new $FS_NAME fs_metadata fs_data
+ wait_mds_active $FS_NAME
+
+
+ ceph tell mds.a help
+ expect_false ceph tell mds.z help
+
+ remove_all_fs
+ ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
+ ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
+}
+
+function test_mgr_tell()
+{
+ ceph tell mgr version
+}
+
+function test_mgr_devices()
+{
+ ceph device ls
+ expect_false ceph device info doesnotexist
+ expect_false ceph device get-health-metrics doesnotexist
+}
+
+function test_per_pool_scrub_status()
+{
+ ceph osd pool create noscrub_pool 16
+ ceph osd pool create noscrub_pool2 16
+ ceph -s | expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
+ ceph -s --format json | \
+ jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
+ expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
+ ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail |
+ expect_false grep -q "Pool .* has .*scrub.* flag"
+ ceph health detail | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
+ expect_false grep -q "Pool .* has .*scrub.* flag"
+
+ ceph osd pool set noscrub_pool noscrub 1
+ ceph -s | expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
+ ceph -s --format json | \
+ jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
+ expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
+ ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
+ expect_true grep -q "Pool noscrub_pool has noscrub flag"
+ ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
+
+ ceph osd pool set noscrub_pool nodeep-scrub 1
+ ceph osd pool set noscrub_pool2 nodeep-scrub 1
+ ceph -s | expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
+ ceph -s --format json | \
+ jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
+ expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
+ ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
+ expect_true grep -q "Pool noscrub_pool has noscrub flag"
+ ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
+ expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
+ ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
+ expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
+ ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
+ ceph health detail | expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
+ ceph health detail | expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
+
+ ceph osd pool rm noscrub_pool noscrub_pool --yes-i-really-really-mean-it
+ ceph osd pool rm noscrub_pool2 noscrub_pool2 --yes-i-really-really-mean-it
+}
+
+#
+# New tests should be added to the TESTS array below
+#
+# Individual tests may be run using the '-t <testname>' argument
+# The user can specify '-t <testname>' as many times as she wants
+#
+# Tests will be run in order presented in the TESTS array, or in
+# the order specified by the '-t <testname>' options.
+#
+# '-l' will list all the available test names
+# '-h' will show usage
+#
+# The test maintains backward compatibility: not specifying arguments
+# will run all tests following the order they appear in the TESTS array.
+#
+
+set +x
+MON_TESTS+=" mon_injectargs"
+MON_TESTS+=" mon_injectargs_SI"
+for i in `seq 9`; do
+ MON_TESTS+=" tiering_$i";
+done
+MON_TESTS+=" auth"
+MON_TESTS+=" auth_profiles"
+MON_TESTS+=" mon_misc"
+MON_TESTS+=" mon_mon"
+MON_TESTS+=" mon_osd"
+MON_TESTS+=" mon_config_key"
+MON_TESTS+=" mon_crush"
+MON_TESTS+=" mon_osd_create_destroy"
+MON_TESTS+=" mon_osd_pool"
+MON_TESTS+=" mon_osd_pool_quota"
+MON_TESTS+=" mon_pg"
+MON_TESTS+=" mon_osd_pool_set"
+MON_TESTS+=" mon_osd_tiered_pool_set"
+MON_TESTS+=" mon_osd_erasure_code"
+MON_TESTS+=" mon_osd_misc"
+MON_TESTS+=" mon_tell"
+MON_TESTS+=" mon_ping"
+MON_TESTS+=" mon_deprecated_commands"
+MON_TESTS+=" mon_caps"
+MON_TESTS+=" mon_cephdf_commands"
+MON_TESTS+=" mon_tell_help_command"
+MON_TESTS+=" mon_stdin_stdout"
+
+OSD_TESTS+=" osd_bench"
+OSD_TESTS+=" osd_negative_filestore_merge_threshold"
+OSD_TESTS+=" tiering_agent"
+OSD_TESTS+=" admin_heap_profiler"
+OSD_TESTS+=" osd_tell_help_command"
+OSD_TESTS+=" osd_compact"
+OSD_TESTS+=" per_pool_scrub_status"
+
+MDS_TESTS+=" mds_tell"
+MDS_TESTS+=" mon_mds"
+MDS_TESTS+=" mon_mds_metadata"
+MDS_TESTS+=" mds_tell_help_command"
+
+MGR_TESTS+=" mgr_tell"
+MGR_TESTS+=" mgr_devices"
+
+TESTS+=$MON_TESTS
+TESTS+=$OSD_TESTS
+TESTS+=$MDS_TESTS
+TESTS+=$MGR_TESTS
+
+#
+# "main" follows
+#
+
+function list_tests()
+{
+ echo "AVAILABLE TESTS"
+ for i in $TESTS; do
+ echo " $i"
+ done
+}
+
+function usage()
+{
+ echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
+}
+
+tests_to_run=()
+
+sanity_check=true
+
+while [[ $# -gt 0 ]]; do
+ opt=$1
+
+ case "$opt" in
+ "-l" )
+ do_list=1
+ ;;
+ "--asok-does-not-need-root" )
+ SUDO=""
+ ;;
+ "--no-sanity-check" )
+ sanity_check=false
+ ;;
+ "--test-mon" )
+ tests_to_run+="$MON_TESTS"
+ ;;
+ "--test-osd" )
+ tests_to_run+="$OSD_TESTS"
+ ;;
+ "--test-mds" )
+ tests_to_run+="$MDS_TESTS"
+ ;;
+ "--test-mgr" )
+ tests_to_run+="$MGR_TESTS"
+ ;;
+ "-t" )
+ shift
+ if [[ -z "$1" ]]; then
+ echo "missing argument to '-t'"
+ usage ;
+ exit 1
+ fi
+ tests_to_run+=" $1"
+ ;;
+ "-h" )
+ usage ;
+ exit 0
+ ;;
+ esac
+ shift
+done
+
+if [[ $do_list -eq 1 ]]; then
+ list_tests ;
+ exit 0
+fi
+
+ceph osd pool create rbd 16
+
+if test -z "$tests_to_run" ; then
+ tests_to_run="$TESTS"
+fi
+
+if $sanity_check ; then
+ wait_no_osd_down
+fi
+for i in $tests_to_run; do
+ if $sanity_check ; then
+ check_no_osd_down
+ fi
+ set -x
+ test_${i}
+ set +x
+done
+if $sanity_check ; then
+ check_no_osd_down
+fi
+
+set -x
+
+echo OK
diff --git a/qa/workunits/cephtool/test_daemon.sh b/qa/workunits/cephtool/test_daemon.sh
new file mode 100755
index 000000000..08ae937cc
--- /dev/null
+++ b/qa/workunits/cephtool/test_daemon.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+set -ex
+
+expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+echo note: assuming mon.a is on the current host
+
+# can set to 'sudo ./ceph' to execute tests from current dir for development
+CEPH=${CEPH:-'sudo ceph'}
+
+${CEPH} daemon mon.a version | grep version
+
+# get debug_ms setting and strip it, painfully for reuse
+old_ms=$(${CEPH} daemon mon.a config get debug_ms | \
+ grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g')
+${CEPH} daemon mon.a config set debug_ms 13
+new_ms=$(${CEPH} daemon mon.a config get debug_ms | \
+ grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g')
+[ "$new_ms" = "13/13" ]
+${CEPH} daemon mon.a config set debug_ms $old_ms
+new_ms=$(${CEPH} daemon mon.a config get debug_ms | \
+ grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g')
+[ "$new_ms" = "$old_ms" ]
+
+# unregistered/non-existent command
+expect_false ${CEPH} daemon mon.a bogus_command_blah foo
+
+set +e
+OUTPUT=$(${CEPH} -c /not/a/ceph.conf daemon mon.a help 2>&1)
+# look for EINVAL
+if [ $? != 22 ] ; then exit 1; fi
+if ! echo "$OUTPUT" | grep -q '.*open.*/not/a/ceph.conf'; then
+ echo "didn't find expected error in bad conf search"
+ exit 1
+fi
+set -e
+
+echo OK
diff --git a/qa/workunits/cephtool/test_kvstore_tool.sh b/qa/workunits/cephtool/test_kvstore_tool.sh
new file mode 100755
index 000000000..b7953dd21
--- /dev/null
+++ b/qa/workunits/cephtool/test_kvstore_tool.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+
+set -x
+
+source $(dirname $0)/../../standalone/ceph-helpers.sh
+
+set -e
+set -o functrace
+PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
+SUDO=${SUDO:-sudo}
+export CEPH_DEV=1
+
+echo note: test ceph_kvstore_tool with bluestore
+
+expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+TEMP_DIR=$(mktemp -d ./cephtool.XXX)
+trap "rm -fr $TEMP_DIR" 0
+
+TEMP_FILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
+
+function test_ceph_kvstore_tool()
+{
+ # create a data directory
+ ceph-objectstore-tool --data-path ${TEMP_DIR} --op mkfs --no-mon-config
+
+ # list
+ origin_kv_nums=`ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list 2>/dev/null | wc -l`
+
+ # exists
+ prefix=`ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list 2>/dev/null | head -n 1 | awk '{print $1}'`
+ ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists ${prefix}
+ expect_false ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists ${prefix}notexist
+
+ # list-crc
+ ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list-crc
+ ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list-crc ${prefix}
+
+ # list with prefix
+ ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list ${prefix}
+
+ # set
+ echo "helloworld" >> ${TEMP_FILE}
+ ceph-kvstore-tool bluestore-kv ${TEMP_DIR} set TESTPREFIX TESTKEY in ${TEMP_FILE}
+ ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists TESTPREFIX TESTKEY
+
+ # get
+ ceph-kvstore-tool bluestore-kv ${TEMP_DIR} get TESTPREFIX TESTKEY out ${TEMP_FILE}.bak
+ diff ${TEMP_FILE} ${TEMP_FILE}.bak
+
+ # rm
+ ceph-kvstore-tool bluestore-kv ${TEMP_DIR} rm TESTPREFIX TESTKEY
+ expect_false ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists TESTPREFIX TESTKEY
+
+ # compact
+ ceph-kvstore-tool bluestore-kv ${TEMP_DIR} compact
+
+ # destructive-repair
+ ceph-kvstore-tool bluestore-kv ${TEMP_DIR} destructive-repair
+
+ current_kv_nums=`ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list 2>/dev/null | wc -l`
+ test ${origin_kv_nums} -eq ${current_kv_nums}
+}
+
+test_ceph_kvstore_tool
+
+echo OK
diff --git a/qa/workunits/client/test.sh b/qa/workunits/client/test.sh
new file mode 100755
index 000000000..12abd3a5d
--- /dev/null
+++ b/qa/workunits/client/test.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+set -ex
+
+ceph_test_client
diff --git a/qa/workunits/cls/test_cls_2pc_queue.sh b/qa/workunits/cls/test_cls_2pc_queue.sh
new file mode 100755
index 000000000..b4f68800f
--- /dev/null
+++ b/qa/workunits/cls/test_cls_2pc_queue.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_cls_2pc_queue
+
+exit 0
diff --git a/qa/workunits/cls/test_cls_cas.sh b/qa/workunits/cls/test_cls_cas.sh
new file mode 100755
index 000000000..765913482
--- /dev/null
+++ b/qa/workunits/cls/test_cls_cas.sh
@@ -0,0 +1,6 @@
+#!/bin/sh -e
+
+GTEST_FILTER=${CLS_CAS_GTEST_FILTER:-*}
+ceph_test_cls_cas --gtest_filter=${GTEST_FILTER}
+
+exit 0
diff --git a/qa/workunits/cls/test_cls_cmpomap.sh b/qa/workunits/cls/test_cls_cmpomap.sh
new file mode 100755
index 000000000..af079f6e6
--- /dev/null
+++ b/qa/workunits/cls/test_cls_cmpomap.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_cls_cmpomap
+
+exit 0
diff --git a/qa/workunits/cls/test_cls_hello.sh b/qa/workunits/cls/test_cls_hello.sh
new file mode 100755
index 000000000..0a2e09620
--- /dev/null
+++ b/qa/workunits/cls/test_cls_hello.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_cls_hello
+
+exit 0
diff --git a/qa/workunits/cls/test_cls_journal.sh b/qa/workunits/cls/test_cls_journal.sh
new file mode 100755
index 000000000..9aa7450a9
--- /dev/null
+++ b/qa/workunits/cls/test_cls_journal.sh
@@ -0,0 +1,6 @@
+#!/bin/sh -e
+
+GTEST_FILTER=${CLS_JOURNAL_GTEST_FILTER:-*}
+ceph_test_cls_journal --gtest_filter=${GTEST_FILTER}
+
+exit 0
diff --git a/qa/workunits/cls/test_cls_lock.sh b/qa/workunits/cls/test_cls_lock.sh
new file mode 100755
index 000000000..c14527053
--- /dev/null
+++ b/qa/workunits/cls/test_cls_lock.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_cls_lock
+
+exit 0
diff --git a/qa/workunits/cls/test_cls_log.sh b/qa/workunits/cls/test_cls_log.sh
new file mode 100755
index 000000000..523f985e7
--- /dev/null
+++ b/qa/workunits/cls/test_cls_log.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_cls_log
+
+exit 0
diff --git a/qa/workunits/cls/test_cls_numops.sh b/qa/workunits/cls/test_cls_numops.sh
new file mode 100755
index 000000000..dcbafcab2
--- /dev/null
+++ b/qa/workunits/cls/test_cls_numops.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_cls_numops
+
+exit 0
diff --git a/qa/workunits/cls/test_cls_rbd.sh b/qa/workunits/cls/test_cls_rbd.sh
new file mode 100755
index 000000000..fd4bec0f8
--- /dev/null
+++ b/qa/workunits/cls/test_cls_rbd.sh
@@ -0,0 +1,6 @@
+#!/bin/sh -e
+
+GTEST_FILTER=${CLS_RBD_GTEST_FILTER:-*}
+ceph_test_cls_rbd --gtest_filter=${GTEST_FILTER}
+
+exit 0
diff --git a/qa/workunits/cls/test_cls_refcount.sh b/qa/workunits/cls/test_cls_refcount.sh
new file mode 100755
index 000000000..d722f5ad9
--- /dev/null
+++ b/qa/workunits/cls/test_cls_refcount.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_cls_refcount
+
+exit 0
diff --git a/qa/workunits/cls/test_cls_rgw.sh b/qa/workunits/cls/test_cls_rgw.sh
new file mode 100755
index 000000000..257338a05
--- /dev/null
+++ b/qa/workunits/cls/test_cls_rgw.sh
@@ -0,0 +1,8 @@
+#!/bin/sh -e
+
+ceph_test_cls_rgw
+#ceph_test_cls_rgw_meta
+#ceph_test_cls_rgw_log
+#ceph_test_cls_rgw_opstate
+
+exit 0
diff --git a/qa/workunits/cls/test_cls_rgw_gc.sh b/qa/workunits/cls/test_cls_rgw_gc.sh
new file mode 100755
index 000000000..0266438f8
--- /dev/null
+++ b/qa/workunits/cls/test_cls_rgw_gc.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_cls_rgw_gc
+
+exit 0
diff --git a/qa/workunits/cls/test_cls_rgw_stats.sh b/qa/workunits/cls/test_cls_rgw_stats.sh
new file mode 100755
index 000000000..e1b5bd6b9
--- /dev/null
+++ b/qa/workunits/cls/test_cls_rgw_stats.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_cls_rgw_stats
+
+exit 0
diff --git a/qa/workunits/cls/test_cls_sdk.sh b/qa/workunits/cls/test_cls_sdk.sh
new file mode 100755
index 000000000..f1ccdc3b4
--- /dev/null
+++ b/qa/workunits/cls/test_cls_sdk.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_cls_sdk
+
+exit 0
diff --git a/qa/workunits/direct_io/.gitignore b/qa/workunits/direct_io/.gitignore
new file mode 100644
index 000000000..80f1fd1aa
--- /dev/null
+++ b/qa/workunits/direct_io/.gitignore
@@ -0,0 +1,3 @@
+/direct_io_test
+/test_sync_io
+/test_short_dio_read
diff --git a/qa/workunits/direct_io/Makefile b/qa/workunits/direct_io/Makefile
new file mode 100644
index 000000000..20fec0be5
--- /dev/null
+++ b/qa/workunits/direct_io/Makefile
@@ -0,0 +1,11 @@
+CFLAGS = -Wall -Wextra -D_GNU_SOURCE
+
+TARGETS = direct_io_test test_sync_io test_short_dio_read
+
+.c:
+ $(CC) $(CFLAGS) $@.c -o $@
+
+all: $(TARGETS)
+
+clean:
+ rm $(TARGETS)
diff --git a/qa/workunits/direct_io/big.sh b/qa/workunits/direct_io/big.sh
new file mode 100755
index 000000000..43bd6d72b
--- /dev/null
+++ b/qa/workunits/direct_io/big.sh
@@ -0,0 +1,6 @@
+#!/bin/sh -ex
+
+echo "test large (16MB) dio write"
+dd if=/dev/zero of=foo.big bs=16M count=1 oflag=direct
+
+echo OK
diff --git a/qa/workunits/direct_io/direct_io_test.c b/qa/workunits/direct_io/direct_io_test.c
new file mode 100644
index 000000000..ccfbbb860
--- /dev/null
+++ b/qa/workunits/direct_io/direct_io_test.c
@@ -0,0 +1,312 @@
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2011 New Dream Network
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+/*
+ * direct_io_test
+ *
+ * This test does some I/O using O_DIRECT.
+ *
+ * Semantics of O_DIRECT can be found at http://lwn.net/Articles/348739/
+ *
+ */
+
+static int g_num_pages = 100;
+
+static int g_duration = 10;
+
+struct chunk {
+ uint64_t offset;
+ uint64_t pad0;
+ uint64_t pad1;
+ uint64_t pad2;
+ uint64_t pad3;
+ uint64_t pad4;
+ uint64_t pad5;
+ uint64_t not_offset;
+} __attribute__((packed));
+
+static int page_size;
+
+static char temp_file[] = "direct_io_temp_file_XXXXXX";
+
+static int safe_write(int fd, const void *buf, signed int len)
+{
+ const char *b = (const char*)buf;
+ /* Handle EINTR and short writes */
+ while (1) {
+ int res = write(fd, b, len);
+ if (res < 0) {
+ int err = errno;
+ if (err != EINTR) {
+ return err;
+ }
+ }
+ len -= res;
+ b += res;
+ if (len <= 0)
+ return 0;
+ }
+}
+
+static int do_read(int fd, char *buf, int buf_sz)
+{
+ /* We assume no short reads or EINTR. It's not really clear how
+ * those things interact with O_DIRECT. */
+ int ret = read(fd, buf, buf_sz);
+ if (ret < 0) {
+ int err = errno;
+ printf("do_read: error: %d (%s)\n", err, strerror(err));
+ return err;
+ }
+ if (ret != buf_sz) {
+ printf("do_read: short read\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int setup_temp_file(void)
+{
+ int fd;
+ int64_t num_chunks, i;
+
+ if (page_size % sizeof(struct chunk)) {
+ printf("setup_big_file: page_size doesn't divide evenly "
+ "into data blocks.\n");
+ return -EINVAL;
+ }
+
+ fd = mkstemp(temp_file);
+ if (fd < 0) {
+ int err = errno;
+ printf("setup_big_file: mkostemps failed with error %d\n", err);
+ return err;
+ }
+
+ num_chunks = g_num_pages * (page_size / sizeof(struct chunk));
+ for (i = 0; i < num_chunks; ++i) {
+ int ret;
+ struct chunk c;
+ memset(&c, 0, sizeof(c));
+ c.offset = i * sizeof(struct chunk);
+ c.pad0 = 0;
+ c.pad1 = 1;
+ c.pad2 = 2;
+ c.pad3 = 3;
+ c.pad4 = 4;
+ c.pad5 = 5;
+ c.not_offset = ~c.offset;
+ ret = safe_write(fd, &c, sizeof(struct chunk));
+ if (ret) {
+ printf("setup_big_file: safe_write failed with "
+ "error: %d\n", ret);
+ TEMP_FAILURE_RETRY(close(fd));
+ unlink(temp_file);
+ return ret;
+ }
+ }
+ TEMP_FAILURE_RETRY(close(fd));
+ return 0;
+}
+
+static int verify_chunk(const struct chunk *c, uint64_t offset)
+{
+ if (c->offset != offset) {
+ printf("verify_chunk(%" PRId64 "): bad offset value (got: %"
+ PRId64 ", expected: %" PRId64 "\n", offset, c->offset, offset);
+ return EIO;
+ }
+ if (c->pad0 != 0) {
+ printf("verify_chunk(%" PRId64 "): bad pad0 value\n", offset);
+ return EIO;
+ }
+ if (c->pad1 != 1) {
+ printf("verify_chunk(%" PRId64 "): bad pad1 value\n", offset);
+ return EIO;
+ }
+ if (c->pad2 != 2) {
+ printf("verify_chunk(%" PRId64 "): bad pad2 value\n", offset);
+ return EIO;
+ }
+ if (c->pad3 != 3) {
+ printf("verify_chunk(%" PRId64 "): bad pad3 value\n", offset);
+ return EIO;
+ }
+ if (c->pad4 != 4) {
+ printf("verify_chunk(%" PRId64 "): bad pad4 value\n", offset);
+ return EIO;
+ }
+ if (c->pad5 != 5) {
+ printf("verify_chunk(%" PRId64 "): bad pad5 value\n", offset);
+ return EIO;
+ }
+ if (c->not_offset != ~offset) {
+ printf("verify_chunk(%" PRId64 "): bad not_offset value\n",
+ offset);
+ return EIO;
+ }
+ return 0;
+}
+
+static int do_o_direct_reads(void)
+{
+ int fd, ret;
+ unsigned int i;
+ void *buf = 0;
+ time_t cur_time, end_time;
+ ret = posix_memalign(&buf, page_size, page_size);
+ if (ret) {
+ printf("do_o_direct_reads: posix_memalign returned %d\n", ret);
+ goto done;
+ }
+
+ fd = open(temp_file, O_RDONLY | O_DIRECT);
+ if (fd < 0) {
+ ret = errno;
+ printf("do_o_direct_reads: error opening fd: %d\n", ret);
+ goto free_buf;
+ }
+
+ // read the first chunk and see if it looks OK
+ ret = do_read(fd, buf, page_size);
+ if (ret)
+ goto close_fd;
+ ret = verify_chunk((struct chunk*)buf, 0);
+ if (ret)
+ goto close_fd;
+
+ // read some random chunks and see how they look
+ cur_time = time(NULL);
+ end_time = cur_time + g_duration;
+ i = 0;
+ do {
+ time_t next_time;
+ uint64_t offset;
+ int page;
+ unsigned int seed;
+
+ seed = i++;
+ page = rand_r(&seed) % g_num_pages;
+ offset = page;
+ offset *= page_size;
+ if (lseek64(fd, offset, SEEK_SET) == -1) {
+ int err = errno;
+ printf("lseek64(%" PRId64 ") failed: error %d (%s)\n",
+ offset, err, strerror(err));
+ goto close_fd;
+ }
+ ret = do_read(fd, buf, page_size);
+ if (ret)
+ goto close_fd;
+ ret = verify_chunk((struct chunk*)buf, offset);
+ if (ret)
+ goto close_fd;
+ next_time = time(NULL);
+ if (next_time > cur_time) {
+ printf(".");
+ }
+ cur_time = next_time;
+ } while (time(NULL) < end_time);
+
+ printf("\ndo_o_direct_reads: SUCCESS\n");
+close_fd:
+ TEMP_FAILURE_RETRY(close(fd));
+free_buf:
+ free(buf);
+done:
+ return ret;
+}
+
+static void usage(char *argv0)
+{
+ printf("%s: tests direct I/O\n", argv0);
+ printf("-d <seconds>: sets duration to <seconds>\n");
+ printf("-h: this help\n");
+ printf("-p <pages>: sets number of pages to allocate\n");
+}
+
+static void parse_args(int argc, char *argv[])
+{
+ int c;
+ while ((c = getopt (argc, argv, "d:hp:")) != -1) {
+ switch (c) {
+ case 'd':
+ g_duration = atoi(optarg);
+ if (g_duration <= 0) {
+ printf("tried to set invalid value of "
+ "g_duration: %d\n", g_num_pages);
+ exit(1);
+ }
+ break;
+ case 'h':
+ usage(argv[0]);
+ exit(0);
+ break;
+ case 'p':
+ g_num_pages = atoi(optarg);
+ if (g_num_pages <= 0) {
+ printf("tried to set invalid value of "
+ "g_num_pages: %d\n", g_num_pages);
+ exit(1);
+ }
+ break;
+ case '?':
+ usage(argv[0]);
+ exit(1);
+ break;
+ default:
+ usage(argv[0]);
+ exit(1);
+ break;
+ }
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ parse_args(argc, argv);
+
+ setvbuf(stdout, NULL, _IONBF, 0);
+
+ page_size = getpagesize();
+
+ ret = setup_temp_file();
+ if (ret) {
+ printf("setup_temp_file failed with error %d\n", ret);
+ goto done;
+ }
+
+ ret = do_o_direct_reads();
+ if (ret) {
+ printf("do_o_direct_reads failed with error %d\n", ret);
+ goto unlink_temp_file;
+ }
+
+unlink_temp_file:
+ unlink(temp_file);
+done:
+ return ret;
+}
diff --git a/qa/workunits/direct_io/misc.sh b/qa/workunits/direct_io/misc.sh
new file mode 100755
index 000000000..6de080d2d
--- /dev/null
+++ b/qa/workunits/direct_io/misc.sh
@@ -0,0 +1,16 @@
+#!/bin/sh -ex
+
+# a few test cases from henry
+echo "test read from hole"
+dd if=/dev/zero of=dd3 bs=1 seek=1048576 count=0
+dd if=dd3 of=/tmp/ddout1 skip=8 bs=512 count=2 iflag=direct
+dd if=/dev/zero of=/tmp/dd3 bs=512 count=2
+cmp /tmp/dd3 /tmp/ddout1
+
+echo "other thing"
+dd if=/dev/urandom of=/tmp/dd10 bs=500 count=1
+dd if=/tmp/dd10 of=dd10 bs=512 seek=8388 count=1
+dd if=dd10 of=/tmp/dd10out bs=512 skip=8388 count=1 iflag=direct
+cmp /tmp/dd10 /tmp/dd10out
+
+echo OK
diff --git a/qa/workunits/direct_io/test_short_dio_read.c b/qa/workunits/direct_io/test_short_dio_read.c
new file mode 100644
index 000000000..502485557
--- /dev/null
+++ b/qa/workunits/direct_io/test_short_dio_read.c
@@ -0,0 +1,57 @@
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+
+int main()
+{
+ char buf[409600];
+ ssize_t r;
+ int err;
+ int fd = open("shortfile", O_WRONLY|O_CREAT, 0644);
+
+ if (fd < 0) {
+ err = errno;
+ printf("error: open() failed with: %d (%s)\n", err, strerror(err));
+ exit(err);
+ }
+
+ printf("writing first 3 bytes of 10k file\n");
+ r = write(fd, "foo", 3);
+ if (r == -1) {
+ err = errno;
+ printf("error: write() failed with: %d (%s)\n", err, strerror(err));
+ close(fd);
+ exit(err);
+ }
+ r = ftruncate(fd, 10000);
+ if (r == -1) {
+ err = errno;
+ printf("error: ftruncate() failed with: %d (%s)\n", err, strerror(err));
+ close(fd);
+ exit(err);
+ }
+
+ fsync(fd);
+ close(fd);
+
+ printf("reading O_DIRECT\n");
+ fd = open("shortfile", O_RDONLY|O_DIRECT);
+ if (fd < 0) {
+ err = errno;
+ printf("error: open() failed with: %d (%s)\n", err, strerror(err));
+ exit(err);
+ }
+
+ r = read(fd, buf, sizeof(buf));
+ close(fd);
+
+ printf("got %d\n", (int)r);
+ if (r != 10000)
+ return 1;
+ return 0;
+}
diff --git a/qa/workunits/direct_io/test_sync_io.c b/qa/workunits/direct_io/test_sync_io.c
new file mode 100644
index 000000000..f393fa6e8
--- /dev/null
+++ b/qa/workunits/direct_io/test_sync_io.c
@@ -0,0 +1,250 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <linux/types.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <errno.h>
+
+//#include "../client/ioctl.h"
+
+#include <linux/ioctl.h>
+#define CEPH_IOCTL_MAGIC 0x97
+#define CEPH_IOC_SYNCIO _IO(CEPH_IOCTL_MAGIC, 5)
+
+void write_pattern()
+{
+ printf("writing pattern\n");
+
+ uint64_t i;
+ int r;
+
+ int fd = open("foo", O_CREAT|O_WRONLY, 0644);
+ if (fd < 0) {
+ r = errno;
+ printf("write_pattern: error: open() failed with: %d (%s)\n", r, strerror(r));
+ exit(r);
+ }
+ for (i=0; i<1048576 * sizeof(i); i += sizeof(i)) {
+ r = write(fd, &i, sizeof(i));
+ if (r == -1) {
+ r = errno;
+ printf("write_pattern: error: write() failed with: %d (%s)\n", r, strerror(r));
+ break;
+ }
+ }
+
+ close(fd);
+}
+
+int verify_pattern(char *buf, size_t len, uint64_t off)
+{
+ size_t i;
+
+ for (i = 0; i < len; i += sizeof(uint64_t)) {
+ uint64_t expected = i + off;
+ uint64_t actual = *(uint64_t*)(buf + i);
+ if (expected != actual) {
+ printf("error: offset %llu had %llu\n", (unsigned long long)expected,
+ (unsigned long long)actual);
+ exit(1);
+ }
+ }
+ return 0;
+}
+
+void generate_pattern(void *buf, size_t len, uint64_t offset)
+{
+ uint64_t *v = buf;
+ size_t i;
+
+ for (i=0; i<len / sizeof(v); i++)
+ v[i] = i * sizeof(v) + offset;
+ verify_pattern(buf, len, offset);
+}
+
+int read_file(int buf_align, uint64_t offset, int len, int direct) {
+
+ printf("read_file buf_align %d offset %llu len %d\n", buf_align,
+ (unsigned long long)offset, len);
+ void *rawbuf;
+ int r;
+ int flags;
+ int err = 0;
+
+ if(direct)
+ flags = O_RDONLY|O_DIRECT;
+ else
+ flags = O_RDONLY;
+
+ int fd = open("foo", flags);
+ if (fd < 0) {
+ err = errno;
+ printf("read_file: error: open() failed with: %d (%s)\n", err, strerror(err));
+ exit(err);
+ }
+
+ if (!direct)
+ ioctl(fd, CEPH_IOC_SYNCIO);
+
+ if ((r = posix_memalign(&rawbuf, 4096, len + buf_align)) != 0) {
+ printf("read_file: error: posix_memalign failed with %d", r);
+ close(fd);
+ exit (r);
+ }
+
+ void *buf = (char *)rawbuf + buf_align;
+ memset(buf, 0, len);
+ r = pread(fd, buf, len, offset);
+ if (r == -1) {
+ err = errno;
+ printf("read_file: error: pread() failed with: %d (%s)\n", err, strerror(err));
+ goto out;
+ }
+ r = verify_pattern(buf, len, offset);
+
+out:
+ close(fd);
+ free(rawbuf);
+ return r;
+}
+
+int read_direct(int buf_align, uint64_t offset, int len)
+{
+ printf("read_direct buf_align %d offset %llu len %d\n", buf_align,
+ (unsigned long long)offset, len);
+ return read_file(buf_align, offset, len, 1);
+}
+
+int read_sync(int buf_align, uint64_t offset, int len)
+{
+ printf("read_sync buf_align %d offset %llu len %d\n", buf_align,
+ (unsigned long long)offset, len);
+ return read_file(buf_align, offset, len, 0);
+}
+
+int write_file(int buf_align, uint64_t offset, int len, int direct)
+{
+ printf("write_file buf_align %d offset %llu len %d\n", buf_align,
+ (unsigned long long)offset, len);
+ void *rawbuf;
+ int r;
+ int err = 0;
+ int flags;
+ if (direct)
+ flags = O_WRONLY|O_DIRECT|O_CREAT;
+ else
+ flags = O_WRONLY|O_CREAT;
+
+ int fd = open("foo", flags, 0644);
+ if (fd < 0) {
+ int err = errno;
+ printf("write_file: error: open() failed with: %d (%s)\n", err, strerror(err));
+ exit(err);
+ }
+
+ if ((r = posix_memalign(&rawbuf, 4096, len + buf_align)) != 0) {
+ printf("write_file: error: posix_memalign failed with %d", r);
+ err = r;
+ goto out_close;
+ }
+
+ if (!direct)
+ ioctl(fd, CEPH_IOC_SYNCIO);
+
+ void *buf = (char *)rawbuf + buf_align;
+
+ generate_pattern(buf, len, offset);
+
+ r = pwrite(fd, buf, len, offset);
+ close(fd);
+
+ fd = open("foo", O_RDONLY);
+ if (fd < 0) {
+ err = errno;
+ printf("write_file: error: open() failed with: %d (%s)\n", err, strerror(err));
+ free(rawbuf);
+ goto out_unlink;
+ }
+ void *buf2 = malloc(len);
+ if (!buf2) {
+ err = -ENOMEM;
+ printf("write_file: error: malloc failed\n");
+ goto out_free;
+ }
+
+ memset(buf2, 0, len);
+ r = pread(fd, buf2, len, offset);
+ if (r == -1) {
+ err = errno;
+ printf("write_file: error: pread() failed with: %d (%s)\n", err, strerror(err));
+ goto out_free_buf;
+ }
+ r = verify_pattern(buf2, len, offset);
+
+out_free_buf:
+ free(buf2);
+out_free:
+ free(rawbuf);
+out_close:
+ close(fd);
+out_unlink:
+ unlink("foo");
+ if (err)
+ exit(err);
+ return r;
+}
+
+int write_direct(int buf_align, uint64_t offset, int len)
+{
+ printf("write_direct buf_align %d offset %llu len %d\n", buf_align,
+ (unsigned long long)offset, len);
+ return write_file (buf_align, offset, len, 1);
+}
+
+int write_sync(int buf_align, uint64_t offset, int len)
+{
+ printf("write_sync buf_align %d offset %llu len %d\n", buf_align,
+ (unsigned long long)offset, len);
+ return write_file (buf_align, offset, len, 0);
+}
+
+int main(int argc, char **argv)
+{
+ uint64_t i, j, k;
+ int read = 1;
+ int write = 1;
+
+ if (argc >= 2 && strcmp(argv[1], "read") == 0)
+ write = 0;
+ if (argc >= 2 && strcmp(argv[1], "write") == 0)
+ read = 0;
+
+ if (read) {
+ write_pattern();
+
+ for (i = 0; i < 4096; i += 512)
+ for (j = 4*1024*1024 - 4096; j < 4*1024*1024 + 4096; j += 512)
+ for (k = 1024; k <= 16384; k *= 2) {
+ read_direct(i, j, k);
+ read_sync(i, j, k);
+ }
+
+ }
+ unlink("foo");
+ if (write) {
+ for (i = 0; i < 4096; i += 512)
+ for (j = 4*1024*1024 - 4096 + 512; j < 4*1024*1024 + 4096; j += 512)
+ for (k = 1024; k <= 16384; k *= 2) {
+ write_direct(i, j, k);
+ write_sync(i, j, k);
+ }
+ }
+
+
+ return 0;
+}
diff --git a/qa/workunits/erasure-code/.gitignore b/qa/workunits/erasure-code/.gitignore
new file mode 100644
index 000000000..7e563b8b3
--- /dev/null
+++ b/qa/workunits/erasure-code/.gitignore
@@ -0,0 +1,2 @@
+*.log
+*.trs
diff --git a/qa/workunits/erasure-code/bench.html b/qa/workunits/erasure-code/bench.html
new file mode 100644
index 000000000..3b4b6c74c
--- /dev/null
+++ b/qa/workunits/erasure-code/bench.html
@@ -0,0 +1,34 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd" >
+<html>
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+ <title>Erasure Code Plugins Benchmarks</title>
+ <link href="examples.css" rel="stylesheet" type="text/css">
+ <script language="javascript" type="text/javascript" src="jquery.js"></script>
+ <script language="javascript" type="text/javascript" src="jquery.flot.js"></script>
+ <script language="javascript" type="text/javascript" src="jquery.flot.categories.js"></script>
+ <script language="javascript" type="text/javascript" src="bench.js"></script>
+ <script language="javascript" type="text/javascript" src="plot.js"></script>
+ </head>
+ <body>
+
+ <div id="header">
+ <h2>Erasure Code Plugins Benchmarks</h2>
+ </div>
+
+ <div id="content">
+
+ <div class="demo-container">
+ <div id="encode" class="demo-placeholder"></div>
+ </div>
+ <p>encode: Y = GB/s, X = K/M</p>
+
+ <div class="demo-container">
+ <div id="decode" class="demo-placeholder"></div>
+ </div>
+ <p>decode: Y = GB/s, X = K/M/erasures</p>
+
+ </div>
+
+ </body>
+</html>
diff --git a/qa/workunits/erasure-code/bench.sh b/qa/workunits/erasure-code/bench.sh
new file mode 100755
index 000000000..8e288f053
--- /dev/null
+++ b/qa/workunits/erasure-code/bench.sh
@@ -0,0 +1,192 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2015 Red Hat <contact@redhat.com>
+# Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+# Test that it works from sources with:
+#
+# CEPH_ERASURE_CODE_BENCHMARK=src/ceph_erasure_code_benchmark \
+# PLUGIN_DIRECTORY=build/lib \
+# qa/workunits/erasure-code/bench.sh fplot jerasure |
+# tee qa/workunits/erasure-code/bench.js
+#
+# This should start immediately and display:
+#
+# ...
+# [ '2/1', .48035538612887358583 ],
+# [ '3/2', .21648470405675016626 ],
+# etc.
+#
+# and complete within a few seconds. The result can then be displayed with:
+#
+# firefox qa/workunits/erasure-code/bench.html
+#
+# Once it is confirmed to work, it can be run with a more significant
+# volume of data so that the measures are more reliable:
+#
+# TOTAL_SIZE=$((4 * 1024 * 1024 * 1024)) \
+# CEPH_ERASURE_CODE_BENCHMARK=src/ceph_erasure_code_benchmark \
+# PLUGIN_DIRECTORY=build/lib \
+# qa/workunits/erasure-code/bench.sh fplot jerasure |
+# tee qa/workunits/erasure-code/bench.js
+#
+set -e
+
+export PATH=/sbin:$PATH
+
+: ${VERBOSE:=false}
+: ${CEPH_ERASURE_CODE_BENCHMARK:=ceph_erasure_code_benchmark}
+: ${PLUGIN_DIRECTORY:=/usr/lib/ceph/erasure-code}
+: ${PLUGINS:=isa jerasure}
+: ${TECHNIQUES:=vandermonde cauchy}
+: ${TOTAL_SIZE:=$((1024 * 1024))}
+: ${SIZE:=4096}
+: ${PARAMETERS:=--parameter jerasure-per-chunk-alignment=true}
+
+function bench_header() {
+ echo -e "seconds\tKB\tplugin\tk\tm\twork.\titer.\tsize\teras.\tcommand."
+}
+
+function bench() {
+ local plugin=$1
+ shift
+ local k=$1
+ shift
+ local m=$1
+ shift
+ local workload=$1
+ shift
+ local iterations=$1
+ shift
+ local size=$1
+ shift
+ local erasures=$1
+ shift
+ command=$(echo $CEPH_ERASURE_CODE_BENCHMARK \
+ --plugin $plugin \
+ --workload $workload \
+ --iterations $iterations \
+ --size $size \
+ --erasures $erasures \
+ --parameter k=$k \
+ --parameter m=$m \
+ --erasure-code-dir $PLUGIN_DIRECTORY)
+ result=$($command "$@")
+ echo -e "$result\t$plugin\t$k\t$m\t$workload\t$iterations\t$size\t$erasures\t$command ""$@"
+}
+
+function packetsize() {
+ local k=$1
+ local w=$2
+ local vector_wordsize=$3
+ local size=$4
+
+ local p=$(( ($size / $k / $w / $vector_wordsize ) * $vector_wordsize))
+ if [ $p -gt 3100 ] ; then
+ p=3100
+ fi
+ echo $p
+}
+
+function bench_run() {
+ local plugin=jerasure
+ local w=8
+ local VECTOR_WORDSIZE=16
+ local ks="2 3 4 6 10"
+ declare -A k2ms
+ k2ms[2]="1"
+ k2ms[3]="2"
+ k2ms[4]="2 3"
+ k2ms[6]="2 3 4"
+ k2ms[10]="3 4"
+ local isa2technique_vandermonde='reed_sol_van'
+ local isa2technique_cauchy='cauchy'
+ local jerasure2technique_vandermonde='reed_sol_van'
+ local jerasure2technique_cauchy='cauchy_good'
+ for technique in ${TECHNIQUES} ; do
+ for plugin in ${PLUGINS} ; do
+ eval technique_parameter=\$${plugin}2technique_${technique}
+ echo "serie encode_${technique}_${plugin}"
+ for k in $ks ; do
+ for m in ${k2ms[$k]} ; do
+ bench $plugin $k $m encode $(($TOTAL_SIZE / $SIZE)) $SIZE 0 \
+ --parameter packetsize=$(packetsize $k $w $VECTOR_WORDSIZE $SIZE) \
+ ${PARAMETERS} \
+ --parameter technique=$technique_parameter
+
+ done
+ done
+ done
+ done
+ for technique in ${TECHNIQUES} ; do
+ for plugin in ${PLUGINS} ; do
+ eval technique_parameter=\$${plugin}2technique_${technique}
+ echo "serie decode_${technique}_${plugin}"
+ for k in $ks ; do
+ for m in ${k2ms[$k]} ; do
+ echo
+ for erasures in $(seq 1 $m) ; do
+ bench $plugin $k $m decode $(($TOTAL_SIZE / $SIZE)) $SIZE $erasures \
+ --parameter packetsize=$(packetsize $k $w $VECTOR_WORDSIZE $SIZE) \
+ ${PARAMETERS} \
+ --parameter technique=$technique_parameter
+ done
+ done
+ done
+ done
+ done
+}
+
+function fplot() {
+ local serie
+ bench_run | while read seconds total plugin k m workload iteration size erasures rest ; do
+ if [ -z $seconds ] ; then
+ echo null,
+ elif [ $seconds = serie ] ; then
+ if [ "$serie" ] ; then
+ echo '];'
+ fi
+ local serie=`echo $total | sed 's/cauchy_\([0-9]\)/cauchy_good_\1/g'`
+ echo "var $serie = ["
+ else
+ local x
+ if [ $workload = encode ] ; then
+ x=$k/$m
+ else
+ x=$k/$m/$erasures
+ fi
+ echo "[ '$x', " $(echo "( $total / 1024 / 1024 ) / $seconds" | bc -ql) " ], "
+ fi
+ done
+ echo '];'
+}
+
+function main() {
+ bench_header
+ bench_run
+}
+
+if [ "$1" = fplot ] ; then
+ "$@"
+else
+ main
+fi
+# Local Variables:
+# compile-command: "\
+# CEPH_ERASURE_CODE_BENCHMARK=../../../src/ceph_erasure_code_benchmark \
+# PLUGIN_DIRECTORY=../../../build/lib \
+# ./bench.sh
+# "
+# End:
diff --git a/qa/workunits/erasure-code/encode-decode-non-regression.sh b/qa/workunits/erasure-code/encode-decode-non-regression.sh
new file mode 100755
index 000000000..7f36c91c7
--- /dev/null
+++ b/qa/workunits/erasure-code/encode-decode-non-regression.sh
@@ -0,0 +1,40 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2014 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+set -ex
+
+: ${CORPUS:=https://github.com/ceph/ceph-erasure-code-corpus.git}
+: ${DIRECTORY:=$CEPH_ROOT/ceph-erasure-code-corpus}
+
+# when running from sources, the current directory must have precedence
+export PATH=:$PATH
+
+if ! test -d $DIRECTORY ; then
+ git clone $CORPUS $DIRECTORY
+fi
+
+my_version=v$(ceph --version | cut -f3 -d ' ')
+
+all_versions=$((ls -d $DIRECTORY/v* ; echo $DIRECTORY/$my_version ) | sort)
+
+for version in $all_versions ; do
+ if test -d $version ; then
+ $version/non-regression.sh
+ fi
+ if test $version = $DIRECTORY/$my_version ; then
+ break
+ fi
+done
diff --git a/qa/workunits/erasure-code/examples.css b/qa/workunits/erasure-code/examples.css
new file mode 100644
index 000000000..ee4724778
--- /dev/null
+++ b/qa/workunits/erasure-code/examples.css
@@ -0,0 +1,97 @@
+* { padding: 0; margin: 0; vertical-align: top; }
+
+body {
+ background: url(background.png) repeat-x;
+ font: 18px/1.5em "proxima-nova", Helvetica, Arial, sans-serif;
+}
+
+a { color: #069; }
+a:hover { color: #28b; }
+
+h2 {
+ margin-top: 15px;
+ font: normal 32px "omnes-pro", Helvetica, Arial, sans-serif;
+}
+
+h3 {
+ margin-left: 30px;
+ font: normal 26px "omnes-pro", Helvetica, Arial, sans-serif;
+ color: #666;
+}
+
+p {
+ margin-top: 10px;
+}
+
+button {
+ font-size: 18px;
+ padding: 1px 7px;
+}
+
+input {
+ font-size: 18px;
+}
+
+input[type=checkbox] {
+ margin: 7px;
+}
+
+#header {
+ position: relative;
+ width: 900px;
+ margin: auto;
+}
+
+#header h2 {
+ margin-left: 10px;
+ vertical-align: middle;
+ font-size: 42px;
+ font-weight: bold;
+ text-decoration: none;
+ color: #000;
+}
+
+#content {
+ width: 880px;
+ margin: 0 auto;
+ padding: 10px;
+}
+
+#footer {
+ margin-top: 25px;
+ margin-bottom: 10px;
+ text-align: center;
+ font-size: 12px;
+ color: #999;
+}
+
+.demo-container {
+ box-sizing: border-box;
+ width: 850px;
+ height: 450px;
+ padding: 20px 15px 15px 15px;
+ margin: 15px auto 30px auto;
+ border: 1px solid #ddd;
+ background: #fff;
+ background: linear-gradient(#f6f6f6 0, #fff 50px);
+ background: -o-linear-gradient(#f6f6f6 0, #fff 50px);
+ background: -ms-linear-gradient(#f6f6f6 0, #fff 50px);
+ background: -moz-linear-gradient(#f6f6f6 0, #fff 50px);
+ background: -webkit-linear-gradient(#f6f6f6 0, #fff 50px);
+ box-shadow: 0 3px 10px rgba(0,0,0,0.15);
+ -o-box-shadow: 0 3px 10px rgba(0,0,0,0.1);
+ -ms-box-shadow: 0 3px 10px rgba(0,0,0,0.1);
+ -moz-box-shadow: 0 3px 10px rgba(0,0,0,0.1);
+ -webkit-box-shadow: 0 3px 10px rgba(0,0,0,0.1);
+}
+
+.demo-placeholder {
+ width: 100%;
+ height: 100%;
+ font-size: 14px;
+ line-height: 1.2em;
+}
+
+.legend table {
+ border-spacing: 5px;
+} \ No newline at end of file
diff --git a/qa/workunits/erasure-code/jquery.flot.categories.js b/qa/workunits/erasure-code/jquery.flot.categories.js
new file mode 100644
index 000000000..2f9b25797
--- /dev/null
+++ b/qa/workunits/erasure-code/jquery.flot.categories.js
@@ -0,0 +1,190 @@
+/* Flot plugin for plotting textual data or categories.
+
+Copyright (c) 2007-2014 IOLA and Ole Laursen.
+Licensed under the MIT license.
+
+Consider a dataset like [["February", 34], ["March", 20], ...]. This plugin
+allows you to plot such a dataset directly.
+
+To enable it, you must specify mode: "categories" on the axis with the textual
+labels, e.g.
+
+ $.plot("#placeholder", data, { xaxis: { mode: "categories" } });
+
+By default, the labels are ordered as they are met in the data series. If you
+need a different ordering, you can specify "categories" on the axis options
+and list the categories there:
+
+ xaxis: {
+ mode: "categories",
+ categories: ["February", "March", "April"]
+ }
+
+If you need to customize the distances between the categories, you can specify
+"categories" as an object mapping labels to values
+
+ xaxis: {
+ mode: "categories",
+ categories: { "February": 1, "March": 3, "April": 4 }
+ }
+
+If you don't specify all categories, the remaining categories will be numbered
+from the max value plus 1 (with a spacing of 1 between each).
+
+Internally, the plugin works by transforming the input data through an auto-
+generated mapping where the first category becomes 0, the second 1, etc.
+Hence, a point like ["February", 34] becomes [0, 34] internally in Flot (this
+is visible in hover and click events that return numbers rather than the
+category labels). The plugin also overrides the tick generator to spit out the
+categories as ticks instead of the values.
+
+If you need to map a value back to its label, the mapping is always accessible
+as "categories" on the axis object, e.g. plot.getAxes().xaxis.categories.
+
+*/
+
+(function ($) {
+ var options = {
+ xaxis: {
+ categories: null
+ },
+ yaxis: {
+ categories: null
+ }
+ };
+
+ function processRawData(plot, series, data, datapoints) {
+ // if categories are enabled, we need to disable
+ // auto-transformation to numbers so the strings are intact
+ // for later processing
+
+ var xCategories = series.xaxis.options.mode == "categories",
+ yCategories = series.yaxis.options.mode == "categories";
+
+ if (!(xCategories || yCategories))
+ return;
+
+ var format = datapoints.format;
+
+ if (!format) {
+ // FIXME: auto-detection should really not be defined here
+ var s = series;
+ format = [];
+ format.push({ x: true, number: true, required: true });
+ format.push({ y: true, number: true, required: true });
+
+ if (s.bars.show || (s.lines.show && s.lines.fill)) {
+ var autoscale = !!((s.bars.show && s.bars.zero) || (s.lines.show && s.lines.zero));
+ format.push({ y: true, number: true, required: false, defaultValue: 0, autoscale: autoscale });
+ if (s.bars.horizontal) {
+ delete format[format.length - 1].y;
+ format[format.length - 1].x = true;
+ }
+ }
+
+ datapoints.format = format;
+ }
+
+ for (var m = 0; m < format.length; ++m) {
+ if (format[m].x && xCategories)
+ format[m].number = false;
+
+ if (format[m].y && yCategories)
+ format[m].number = false;
+ }
+ }
+
+ function getNextIndex(categories) {
+ var index = -1;
+
+ for (var v in categories)
+ if (categories[v] > index)
+ index = categories[v];
+
+ return index + 1;
+ }
+
+ function categoriesTickGenerator(axis) {
+ var res = [];
+ for (var label in axis.categories) {
+ var v = axis.categories[label];
+ if (v >= axis.min && v <= axis.max)
+ res.push([v, label]);
+ }
+
+ res.sort(function (a, b) { return a[0] - b[0]; });
+
+ return res;
+ }
+
+ function setupCategoriesForAxis(series, axis, datapoints) {
+ if (series[axis].options.mode != "categories")
+ return;
+
+ if (!series[axis].categories) {
+ // parse options
+ var c = {}, o = series[axis].options.categories || {};
+ if ($.isArray(o)) {
+ for (var i = 0; i < o.length; ++i)
+ c[o[i]] = i;
+ }
+ else {
+ for (var v in o)
+ c[v] = o[v];
+ }
+
+ series[axis].categories = c;
+ }
+
+ // fix ticks
+ if (!series[axis].options.ticks)
+ series[axis].options.ticks = categoriesTickGenerator;
+
+ transformPointsOnAxis(datapoints, axis, series[axis].categories);
+ }
+
+ function transformPointsOnAxis(datapoints, axis, categories) {
+ // go through the points, transforming them
+ var points = datapoints.points,
+ ps = datapoints.pointsize,
+ format = datapoints.format,
+ formatColumn = axis.charAt(0),
+ index = getNextIndex(categories);
+
+ for (var i = 0; i < points.length; i += ps) {
+ if (points[i] == null)
+ continue;
+
+ for (var m = 0; m < ps; ++m) {
+ var val = points[i + m];
+
+ if (val == null || !format[m][formatColumn])
+ continue;
+
+ if (!(val in categories)) {
+ categories[val] = index;
+ ++index;
+ }
+
+ points[i + m] = categories[val];
+ }
+ }
+ }
+
+ function processDatapoints(plot, series, datapoints) {
+ setupCategoriesForAxis(series, "xaxis", datapoints);
+ setupCategoriesForAxis(series, "yaxis", datapoints);
+ }
+
+ function init(plot) {
+ plot.hooks.processRawData.push(processRawData);
+ plot.hooks.processDatapoints.push(processDatapoints);
+ }
+
+ $.plot.plugins.push({
+ init: init,
+ options: options,
+ name: 'categories',
+ version: '1.0'
+ });
+})(jQuery);
diff --git a/qa/workunits/erasure-code/jquery.flot.js b/qa/workunits/erasure-code/jquery.flot.js
new file mode 100644
index 000000000..39f3e4cf3
--- /dev/null
+++ b/qa/workunits/erasure-code/jquery.flot.js
@@ -0,0 +1,3168 @@
+/* Javascript plotting library for jQuery, version 0.8.3.
+
+Copyright (c) 2007-2014 IOLA and Ole Laursen.
+Licensed under the MIT license.
+
+*/
+
+// first an inline dependency, jquery.colorhelpers.js, we inline it here
+// for convenience
+
+/* Plugin for jQuery for working with colors.
+ *
+ * Version 1.1.
+ *
+ * Inspiration from jQuery color animation plugin by John Resig.
+ *
+ * Released under the MIT license by Ole Laursen, October 2009.
+ *
+ * Examples:
+ *
+ * $.color.parse("#fff").scale('rgb', 0.25).add('a', -0.5).toString()
+ * var c = $.color.extract($("#mydiv"), 'background-color');
+ * console.log(c.r, c.g, c.b, c.a);
+ * $.color.make(100, 50, 25, 0.4).toString() // returns "rgba(100,50,25,0.4)"
+ *
+ * Note that .scale() and .add() return the same modified object
+ * instead of making a new one.
+ *
+ * V. 1.1: Fix error handling so e.g. parsing an empty string does
+ * produce a color rather than just crashing.
+ */
+(function($){$.color={};$.color.make=function(r,g,b,a){var o={};o.r=r||0;o.g=g||0;o.b=b||0;o.a=a!=null?a:1;o.add=function(c,d){for(var i=0;i<c.length;++i)o[c.charAt(i)]+=d;return o.normalize()};o.scale=function(c,f){for(var i=0;i<c.length;++i)o[c.charAt(i)]*=f;return o.normalize()};o.toString=function(){if(o.a>=1){return"rgb("+[o.r,o.g,o.b].join(",")+")"}else{return"rgba("+[o.r,o.g,o.b,o.a].join(",")+")"}};o.normalize=function(){function clamp(min,value,max){return value<min?min:value>max?max:value}o.r=clamp(0,parseInt(o.r),255);o.g=clamp(0,parseInt(o.g),255);o.b=clamp(0,parseInt(o.b),255);o.a=clamp(0,o.a,1);return o};o.clone=function(){return $.color.make(o.r,o.b,o.g,o.a)};return o.normalize()};$.color.extract=function(elem,css){var c;do{c=elem.css(css).toLowerCase();if(c!=""&&c!="transparent")break;elem=elem.parent()}while(elem.length&&!$.nodeName(elem.get(0),"body"));if(c=="rgba(0, 0, 0, 0)")c="transparent";return $.color.parse(c)};$.color.parse=function(str){var res,m=$.color.make;if(res=/rgb\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*\)/.exec(str))return m(parseInt(res[1],10),parseInt(res[2],10),parseInt(res[3],10));if(res=/rgba\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(str))return m(parseInt(res[1],10),parseInt(res[2],10),parseInt(res[3],10),parseFloat(res[4]));if(res=/rgb\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*\)/.exec(str))return m(parseFloat(res[1])*2.55,parseFloat(res[2])*2.55,parseFloat(res[3])*2.55);if(res=/rgba\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(str))return m(parseFloat(res[1])*2.55,parseFloat(res[2])*2.55,parseFloat(res[3])*2.55,parseFloat(res[4]));if(res=/#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/.exec(str))return m(parseInt(res[1],16),parseInt(res[2],16),parseInt(res[3],16));if(res=/#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/.exec(str))return m(parseInt(res[1]+res[1],16),parseInt(res[2]+res[2],16),parseInt(res[3]+res[3],16));var name=$.trim(str).toLowerCase();if(name=="transparent")return m(255,255,255,0);else{res=lookupColors[name]||[0,0,0];return m(res[0],res[1],res[2])}};var lookupColors={aqua:[0,255,255],azure:[240,255,255],beige:[245,245,220],black:[0,0,0],blue:[0,0,255],brown:[165,42,42],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgrey:[169,169,169],darkgreen:[0,100,0],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkviolet:[148,0,211],fuchsia:[255,0,255],gold:[255,215,0],green:[0,128,0],indigo:[75,0,130],khaki:[240,230,140],lightblue:[173,216,230],lightcyan:[224,255,255],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightyellow:[255,255,224],lime:[0,255,0],magenta:[255,0,255],maroon:[128,0,0],navy:[0,0,128],olive:[128,128,0],orange:[255,165,0],pink:[255,192,203],purple:[128,0,128],violet:[128,0,128],red:[255,0,0],silver:[192,192,192],white:[255,255,255],yellow:[255,255,0]}})(jQuery);
+
+// the actual Flot code
+(function($) {
+
+ // Cache the prototype hasOwnProperty for faster access
+
+ var hasOwnProperty = Object.prototype.hasOwnProperty;
+
+ // A shim to provide 'detach' to jQuery versions prior to 1.4. Using a DOM
+ // operation produces the same effect as detach, i.e. removing the element
+ // without touching its jQuery data.
+
+ // Do not merge this into Flot 0.9, since it requires jQuery 1.4.4+.
+
+ if (!$.fn.detach) {
+ $.fn.detach = function() {
+ return this.each(function() {
+ if (this.parentNode) {
+ this.parentNode.removeChild( this );
+ }
+ });
+ };
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // The Canvas object is a wrapper around an HTML5 <canvas> tag.
+ //
+ // @constructor
+ // @param {string} cls List of classes to apply to the canvas.
+ // @param {element} container Element onto which to append the canvas.
+ //
+ // Requiring a container is a little iffy, but unfortunately canvas
+ // operations don't work unless the canvas is attached to the DOM.
+
+ function Canvas(cls, container) {
+
+ var element = container.children("." + cls)[0];
+
+ if (element == null) {
+
+ element = document.createElement("canvas");
+ element.className = cls;
+
+ $(element).css({ direction: "ltr", position: "absolute", left: 0, top: 0 })
+ .appendTo(container);
+
+ // If HTML5 Canvas isn't available, fall back to [Ex|Flash]canvas
+
+ if (!element.getContext) {
+ if (window.G_vmlCanvasManager) {
+ element = window.G_vmlCanvasManager.initElement(element);
+ } else {
+ throw new Error("Canvas is not available. If you're using IE with a fall-back such as Excanvas, then there's either a mistake in your conditional include, or the page has no DOCTYPE and is rendering in Quirks Mode.");
+ }
+ }
+ }
+
+ this.element = element;
+
+ var context = this.context = element.getContext("2d");
+
+ // Determine the screen's ratio of physical to device-independent
+ // pixels. This is the ratio between the canvas width that the browser
+ // advertises and the number of pixels actually present in that space.
+
+ // The iPhone 4, for example, has a device-independent width of 320px,
+ // but its screen is actually 640px wide. It therefore has a pixel
+ // ratio of 2, while most normal devices have a ratio of 1.
+
+ var devicePixelRatio = window.devicePixelRatio || 1,
+ backingStoreRatio =
+ context.webkitBackingStorePixelRatio ||
+ context.mozBackingStorePixelRatio ||
+ context.msBackingStorePixelRatio ||
+ context.oBackingStorePixelRatio ||
+ context.backingStorePixelRatio || 1;
+
+ this.pixelRatio = devicePixelRatio / backingStoreRatio;
+
+ // Size the canvas to match the internal dimensions of its container
+
+ this.resize(container.width(), container.height());
+
+ // Collection of HTML div layers for text overlaid onto the canvas
+
+ this.textContainer = null;
+ this.text = {};
+
+ // Cache of text fragments and metrics, so we can avoid expensively
+ // re-calculating them when the plot is re-rendered in a loop.
+
+ this._textCache = {};
+ }
+
+ // Resizes the canvas to the given dimensions.
+ //
+ // @param {number} width New width of the canvas, in pixels.
+ // @param {number} width New height of the canvas, in pixels.
+
+ Canvas.prototype.resize = function(width, height) {
+
+ if (width <= 0 || height <= 0) {
+ throw new Error("Invalid dimensions for plot, width = " + width + ", height = " + height);
+ }
+
+ var element = this.element,
+ context = this.context,
+ pixelRatio = this.pixelRatio;
+
+ // Resize the canvas, increasing its density based on the display's
+ // pixel ratio; basically giving it more pixels without increasing the
+ // size of its element, to take advantage of the fact that retina
+ // displays have that many more pixels in the same advertised space.
+
+ // Resizing should reset the state (excanvas seems to be buggy though)
+
+ if (this.width != width) {
+ element.width = width * pixelRatio;
+ element.style.width = width + "px";
+ this.width = width;
+ }
+
+ if (this.height != height) {
+ element.height = height * pixelRatio;
+ element.style.height = height + "px";
+ this.height = height;
+ }
+
+ // Save the context, so we can reset in case we get replotted. The
+ // restore ensure that we're really back at the initial state, and
+ // should be safe even if we haven't saved the initial state yet.
+
+ context.restore();
+ context.save();
+
+ // Scale the coordinate space to match the display density; so even though we
+ // may have twice as many pixels, we still want lines and other drawing to
+ // appear at the same size; the extra pixels will just make them crisper.
+
+ context.scale(pixelRatio, pixelRatio);
+ };
+
+ // Clears the entire canvas area, not including any overlaid HTML text
+
+ Canvas.prototype.clear = function() {
+ this.context.clearRect(0, 0, this.width, this.height);
+ };
+
+ // Finishes rendering the canvas, including managing the text overlay.
+
+ Canvas.prototype.render = function() {
+
+ var cache = this._textCache;
+
+ // For each text layer, add elements marked as active that haven't
+ // already been rendered, and remove those that are no longer active.
+
+ for (var layerKey in cache) {
+ if (hasOwnProperty.call(cache, layerKey)) {
+
+ var layer = this.getTextLayer(layerKey),
+ layerCache = cache[layerKey];
+
+ layer.hide();
+
+ for (var styleKey in layerCache) {
+ if (hasOwnProperty.call(layerCache, styleKey)) {
+ var styleCache = layerCache[styleKey];
+ for (var key in styleCache) {
+ if (hasOwnProperty.call(styleCache, key)) {
+
+ var positions = styleCache[key].positions;
+
+ for (var i = 0, position; position = positions[i]; i++) {
+ if (position.active) {
+ if (!position.rendered) {
+ layer.append(position.element);
+ position.rendered = true;
+ }
+ } else {
+ positions.splice(i--, 1);
+ if (position.rendered) {
+ position.element.detach();
+ }
+ }
+ }
+
+ if (positions.length == 0) {
+ delete styleCache[key];
+ }
+ }
+ }
+ }
+ }
+
+ layer.show();
+ }
+ }
+ };
+
+ // Creates (if necessary) and returns the text overlay container.
+ //
+ // @param {string} classes String of space-separated CSS classes used to
+ // uniquely identify the text layer.
+ // @return {object} The jQuery-wrapped text-layer div.
+
+ Canvas.prototype.getTextLayer = function(classes) {
+
+ var layer = this.text[classes];
+
+ // Create the text layer if it doesn't exist
+
+ if (layer == null) {
+
+ // Create the text layer container, if it doesn't exist
+
+ if (this.textContainer == null) {
+ this.textContainer = $("<div class='flot-text'></div>")
+ .css({
+ position: "absolute",
+ top: 0,
+ left: 0,
+ bottom: 0,
+ right: 0,
+ 'font-size': "smaller",
+ color: "#545454"
+ })
+ .insertAfter(this.element);
+ }
+
+ layer = this.text[classes] = $("<div></div>")
+ .addClass(classes)
+ .css({
+ position: "absolute",
+ top: 0,
+ left: 0,
+ bottom: 0,
+ right: 0
+ })
+ .appendTo(this.textContainer);
+ }
+
+ return layer;
+ };
+
+ // Creates (if necessary) and returns a text info object.
+ //
+ // The object looks like this:
+ //
+ // {
+ // width: Width of the text's wrapper div.
+ // height: Height of the text's wrapper div.
+ // element: The jQuery-wrapped HTML div containing the text.
+ // positions: Array of positions at which this text is drawn.
+ // }
+ //
+ // The positions array contains objects that look like this:
+ //
+ // {
+ // active: Flag indicating whether the text should be visible.
+ // rendered: Flag indicating whether the text is currently visible.
+ // element: The jQuery-wrapped HTML div containing the text.
+ // x: X coordinate at which to draw the text.
+ // y: Y coordinate at which to draw the text.
+ // }
+ //
+ // Each position after the first receives a clone of the original element.
+ //
+ // The idea is that that the width, height, and general 'identity' of the
+ // text is constant no matter where it is placed; the placements are a
+ // secondary property.
+ //
+ // Canvas maintains a cache of recently-used text info objects; getTextInfo
+ // either returns the cached element or creates a new entry.
+ //
+ // @param {string} layer A string of space-separated CSS classes uniquely
+ // identifying the layer containing this text.
+ // @param {string} text Text string to retrieve info for.
+ // @param {(string|object)=} font Either a string of space-separated CSS
+ // classes or a font-spec object, defining the text's font and style.
+ // @param {number=} angle Angle at which to rotate the text, in degrees.
+ // Angle is currently unused, it will be implemented in the future.
+ // @param {number=} width Maximum width of the text before it wraps.
+ // @return {object} a text info object.
+
+ Canvas.prototype.getTextInfo = function(layer, text, font, angle, width) {
+
+ var textStyle, layerCache, styleCache, info;
+
+ // Cast the value to a string, in case we were given a number or such
+
+ text = "" + text;
+
+ // If the font is a font-spec object, generate a CSS font definition
+
+ if (typeof font === "object") {
+ textStyle = font.style + " " + font.variant + " " + font.weight + " " + font.size + "px/" + font.lineHeight + "px " + font.family;
+ } else {
+ textStyle = font;
+ }
+
+ // Retrieve (or create) the cache for the text's layer and styles
+
+ layerCache = this._textCache[layer];
+
+ if (layerCache == null) {
+ layerCache = this._textCache[layer] = {};
+ }
+
+ styleCache = layerCache[textStyle];
+
+ if (styleCache == null) {
+ styleCache = layerCache[textStyle] = {};
+ }
+
+ info = styleCache[text];
+
+ // If we can't find a matching element in our cache, create a new one
+
+ if (info == null) {
+
+ var element = $("<div></div>").html(text)
+ .css({
+ position: "absolute",
+ 'max-width': width,
+ top: -9999
+ })
+ .appendTo(this.getTextLayer(layer));
+
+ if (typeof font === "object") {
+ element.css({
+ font: textStyle,
+ color: font.color
+ });
+ } else if (typeof font === "string") {
+ element.addClass(font);
+ }
+
+ info = styleCache[text] = {
+ width: element.outerWidth(true),
+ height: element.outerHeight(true),
+ element: element,
+ positions: []
+ };
+
+ element.detach();
+ }
+
+ return info;
+ };
+
+ // Adds a text string to the canvas text overlay.
+ //
+ // The text isn't drawn immediately; it is marked as rendering, which will
+ // result in its addition to the canvas on the next render pass.
+ //
+ // @param {string} layer A string of space-separated CSS classes uniquely
+ // identifying the layer containing this text.
+ // @param {number} x X coordinate at which to draw the text.
+ // @param {number} y Y coordinate at which to draw the text.
+ // @param {string} text Text string to draw.
+ // @param {(string|object)=} font Either a string of space-separated CSS
+ // classes or a font-spec object, defining the text's font and style.
+ // @param {number=} angle Angle at which to rotate the text, in degrees.
+ // Angle is currently unused, it will be implemented in the future.
+ // @param {number=} width Maximum width of the text before it wraps.
+ // @param {string=} halign Horizontal alignment of the text; either "left",
+ // "center" or "right".
+ // @param {string=} valign Vertical alignment of the text; either "top",
+ // "middle" or "bottom".
+
+ Canvas.prototype.addText = function(layer, x, y, text, font, angle, width, halign, valign) {
+
+ var info = this.getTextInfo(layer, text, font, angle, width),
+ positions = info.positions;
+
+ // Tweak the div's position to match the text's alignment
+
+ if (halign == "center") {
+ x -= info.width / 2;
+ } else if (halign == "right") {
+ x -= info.width;
+ }
+
+ if (valign == "middle") {
+ y -= info.height / 2;
+ } else if (valign == "bottom") {
+ y -= info.height;
+ }
+
+ // Determine whether this text already exists at this position.
+ // If so, mark it for inclusion in the next render pass.
+
+ for (var i = 0, position; position = positions[i]; i++) {
+ if (position.x == x && position.y == y) {
+ position.active = true;
+ return;
+ }
+ }
+
+ // If the text doesn't exist at this position, create a new entry
+
+ // For the very first position we'll re-use the original element,
+ // while for subsequent ones we'll clone it.
+
+ position = {
+ active: true,
+ rendered: false,
+ element: positions.length ? info.element.clone() : info.element,
+ x: x,
+ y: y
+ };
+
+ positions.push(position);
+
+ // Move the element to its final position within the container
+
+ position.element.css({
+ top: Math.round(y),
+ left: Math.round(x),
+ 'text-align': halign // In case the text wraps
+ });
+ };
+
+ // Removes one or more text strings from the canvas text overlay.
+ //
+ // If no parameters are given, all text within the layer is removed.
+ //
+ // Note that the text is not immediately removed; it is simply marked as
+ // inactive, which will result in its removal on the next render pass.
+ // This avoids the performance penalty for 'clear and redraw' behavior,
+ // where we potentially get rid of all text on a layer, but will likely
+ // add back most or all of it later, as when redrawing axes, for example.
+ //
+ // @param {string} layer A string of space-separated CSS classes uniquely
+ // identifying the layer containing this text.
+ // @param {number=} x X coordinate of the text.
+ // @param {number=} y Y coordinate of the text.
+ // @param {string=} text Text string to remove.
+ // @param {(string|object)=} font Either a string of space-separated CSS
+ // classes or a font-spec object, defining the text's font and style.
+ // @param {number=} angle Angle at which the text is rotated, in degrees.
+ // Angle is currently unused, it will be implemented in the future.
+
+ Canvas.prototype.removeText = function(layer, x, y, text, font, angle) {
+ if (text == null) {
+ var layerCache = this._textCache[layer];
+ if (layerCache != null) {
+ for (var styleKey in layerCache) {
+ if (hasOwnProperty.call(layerCache, styleKey)) {
+ var styleCache = layerCache[styleKey];
+ for (var key in styleCache) {
+ if (hasOwnProperty.call(styleCache, key)) {
+ var positions = styleCache[key].positions;
+ for (var i = 0, position; position = positions[i]; i++) {
+ position.active = false;
+ }
+ }
+ }
+ }
+ }
+ }
+ } else {
+ var positions = this.getTextInfo(layer, text, font, angle).positions;
+ for (var i = 0, position; position = positions[i]; i++) {
+ if (position.x == x && position.y == y) {
+ position.active = false;
+ }
+ }
+ }
+ };
+
+ ///////////////////////////////////////////////////////////////////////////
+ // The top-level container for the entire plot.
+
+ function Plot(placeholder, data_, options_, plugins) {
+ // data is on the form:
+ // [ series1, series2 ... ]
+ // where series is either just the data as [ [x1, y1], [x2, y2], ... ]
+ // or { data: [ [x1, y1], [x2, y2], ... ], label: "some label", ... }
+
+ var series = [],
+ options = {
+ // the color theme used for graphs
+ colors: ["#edc240", "#afd8f8", "#cb4b4b", "#4da74d", "#9440ed"],
+ legend: {
+ show: true,
+ noColumns: 1, // number of colums in legend table
+ labelFormatter: null, // fn: string -> string
+ labelBoxBorderColor: "#ccc", // border color for the little label boxes
+ container: null, // container (as jQuery object) to put legend in, null means default on top of graph
+ position: "ne", // position of default legend container within plot
+ margin: 5, // distance from grid edge to default legend container within plot
+ backgroundColor: null, // null means auto-detect
+ backgroundOpacity: 0.85, // set to 0 to avoid background
+ sorted: null // default to no legend sorting
+ },
+ xaxis: {
+ show: null, // null = auto-detect, true = always, false = never
+ position: "bottom", // or "top"
+ mode: null, // null or "time"
+ font: null, // null (derived from CSS in placeholder) or object like { size: 11, lineHeight: 13, style: "italic", weight: "bold", family: "sans-serif", variant: "small-caps" }
+ color: null, // base color, labels, ticks
+ tickColor: null, // possibly different color of ticks, e.g. "rgba(0,0,0,0.15)"
+ transform: null, // null or f: number -> number to transform axis
+ inverseTransform: null, // if transform is set, this should be the inverse function
+ min: null, // min. value to show, null means set automatically
+ max: null, // max. value to show, null means set automatically
+ autoscaleMargin: null, // margin in % to add if auto-setting min/max
+ ticks: null, // either [1, 3] or [[1, "a"], 3] or (fn: axis info -> ticks) or app. number of ticks for auto-ticks
+ tickFormatter: null, // fn: number -> string
+ labelWidth: null, // size of tick labels in pixels
+ labelHeight: null,
+ reserveSpace: null, // whether to reserve space even if axis isn't shown
+ tickLength: null, // size in pixels of ticks, or "full" for whole line
+ alignTicksWithAxis: null, // axis number or null for no sync
+ tickDecimals: null, // no. of decimals, null means auto
+ tickSize: null, // number or [number, "unit"]
+ minTickSize: null // number or [number, "unit"]
+ },
+ yaxis: {
+ autoscaleMargin: 0.02,
+ position: "left" // or "right"
+ },
+ xaxes: [],
+ yaxes: [],
+ series: {
+ points: {
+ show: false,
+ radius: 3,
+ lineWidth: 2, // in pixels
+ fill: true,
+ fillColor: "#ffffff",
+ symbol: "circle" // or callback
+ },
+ lines: {
+ // we don't put in show: false so we can see
+ // whether lines were actively disabled
+ lineWidth: 2, // in pixels
+ fill: false,
+ fillColor: null,
+ steps: false
+ // Omit 'zero', so we can later default its value to
+ // match that of the 'fill' option.
+ },
+ bars: {
+ show: false,
+ lineWidth: 2, // in pixels
+ barWidth: 1, // in units of the x axis
+ fill: true,
+ fillColor: null,
+ align: "left", // "left", "right", or "center"
+ horizontal: false,
+ zero: true
+ },
+ shadowSize: 3,
+ highlightColor: null
+ },
+ grid: {
+ show: true,
+ aboveData: false,
+ color: "#545454", // primary color used for outline and labels
+ backgroundColor: null, // null for transparent, else color
+ borderColor: null, // set if different from the grid color
+ tickColor: null, // color for the ticks, e.g. "rgba(0,0,0,0.15)"
+ margin: 0, // distance from the canvas edge to the grid
+ labelMargin: 5, // in pixels
+ axisMargin: 8, // in pixels
+ borderWidth: 2, // in pixels
+ minBorderMargin: null, // in pixels, null means taken from points radius
+ markings: null, // array of ranges or fn: axes -> array of ranges
+ markingsColor: "#f4f4f4",
+ markingsLineWidth: 2,
+ // interactive stuff
+ clickable: false,
+ hoverable: false,
+ autoHighlight: true, // highlight in case mouse is near
+ mouseActiveRadius: 10 // how far the mouse can be away to activate an item
+ },
+ interaction: {
+ redrawOverlayInterval: 1000/60 // time between updates, -1 means in same flow
+ },
+ hooks: {}
+ },
+ surface = null, // the canvas for the plot itself
+ overlay = null, // canvas for interactive stuff on top of plot
+ eventHolder = null, // jQuery object that events should be bound to
+ ctx = null, octx = null,
+ xaxes = [], yaxes = [],
+ plotOffset = { left: 0, right: 0, top: 0, bottom: 0},
+ plotWidth = 0, plotHeight = 0,
+ hooks = {
+ processOptions: [],
+ processRawData: [],
+ processDatapoints: [],
+ processOffset: [],
+ drawBackground: [],
+ drawSeries: [],
+ draw: [],
+ bindEvents: [],
+ drawOverlay: [],
+ shutdown: []
+ },
+ plot = this;
+
+ // public functions
+ plot.setData = setData;
+ plot.setupGrid = setupGrid;
+ plot.draw = draw;
+ plot.getPlaceholder = function() { return placeholder; };
+ plot.getCanvas = function() { return surface.element; };
+ plot.getPlotOffset = function() { return plotOffset; };
+ plot.width = function () { return plotWidth; };
+ plot.height = function () { return plotHeight; };
+ plot.offset = function () {
+ var o = eventHolder.offset();
+ o.left += plotOffset.left;
+ o.top += plotOffset.top;
+ return o;
+ };
+ plot.getData = function () { return series; };
+ plot.getAxes = function () {
+ var res = {}, i;
+ $.each(xaxes.concat(yaxes), function (_, axis) {
+ if (axis)
+ res[axis.direction + (axis.n != 1 ? axis.n : "") + "axis"] = axis;
+ });
+ return res;
+ };
+ plot.getXAxes = function () { return xaxes; };
+ plot.getYAxes = function () { return yaxes; };
+ plot.c2p = canvasToAxisCoords;
+ plot.p2c = axisToCanvasCoords;
+ plot.getOptions = function () { return options; };
+ plot.highlight = highlight;
+ plot.unhighlight = unhighlight;
+ plot.triggerRedrawOverlay = triggerRedrawOverlay;
+ plot.pointOffset = function(point) {
+ return {
+ left: parseInt(xaxes[axisNumber(point, "x") - 1].p2c(+point.x) + plotOffset.left, 10),
+ top: parseInt(yaxes[axisNumber(point, "y") - 1].p2c(+point.y) + plotOffset.top, 10)
+ };
+ };
+ plot.shutdown = shutdown;
+ plot.destroy = function () {
+ shutdown();
+ placeholder.removeData("plot").empty();
+
+ series = [];
+ options = null;
+ surface = null;
+ overlay = null;
+ eventHolder = null;
+ ctx = null;
+ octx = null;
+ xaxes = [];
+ yaxes = [];
+ hooks = null;
+ highlights = [];
+ plot = null;
+ };
+ plot.resize = function () {
+ var width = placeholder.width(),
+ height = placeholder.height();
+ surface.resize(width, height);
+ overlay.resize(width, height);
+ };
+
+ // public attributes
+ plot.hooks = hooks;
+
+ // initialize
+ initPlugins(plot);
+ parseOptions(options_);
+ setupCanvases();
+ setData(data_);
+ setupGrid();
+ draw();
+ bindEvents();
+
+
+ function executeHooks(hook, args) {
+ args = [plot].concat(args);
+ for (var i = 0; i < hook.length; ++i)
+ hook[i].apply(this, args);
+ }
+
+ function initPlugins() {
+
+ // References to key classes, allowing plugins to modify them
+
+ var classes = {
+ Canvas: Canvas
+ };
+
+ for (var i = 0; i < plugins.length; ++i) {
+ var p = plugins[i];
+ p.init(plot, classes);
+ if (p.options)
+ $.extend(true, options, p.options);
+ }
+ }
+
+ function parseOptions(opts) {
+
+ $.extend(true, options, opts);
+
+ // $.extend merges arrays, rather than replacing them. When less
+ // colors are provided than the size of the default palette, we
+ // end up with those colors plus the remaining defaults, which is
+ // not expected behavior; avoid it by replacing them here.
+
+ if (opts && opts.colors) {
+ options.colors = opts.colors;
+ }
+
+ if (options.xaxis.color == null)
+ options.xaxis.color = $.color.parse(options.grid.color).scale('a', 0.22).toString();
+ if (options.yaxis.color == null)
+ options.yaxis.color = $.color.parse(options.grid.color).scale('a', 0.22).toString();
+
+ if (options.xaxis.tickColor == null) // grid.tickColor for back-compatibility
+ options.xaxis.tickColor = options.grid.tickColor || options.xaxis.color;
+ if (options.yaxis.tickColor == null) // grid.tickColor for back-compatibility
+ options.yaxis.tickColor = options.grid.tickColor || options.yaxis.color;
+
+ if (options.grid.borderColor == null)
+ options.grid.borderColor = options.grid.color;
+ if (options.grid.tickColor == null)
+ options.grid.tickColor = $.color.parse(options.grid.color).scale('a', 0.22).toString();
+
+ // Fill in defaults for axis options, including any unspecified
+ // font-spec fields, if a font-spec was provided.
+
+ // If no x/y axis options were provided, create one of each anyway,
+ // since the rest of the code assumes that they exist.
+
+ var i, axisOptions, axisCount,
+ fontSize = placeholder.css("font-size"),
+ fontSizeDefault = fontSize ? +fontSize.replace("px", "") : 13,
+ fontDefaults = {
+ style: placeholder.css("font-style"),
+ size: Math.round(0.8 * fontSizeDefault),
+ variant: placeholder.css("font-variant"),
+ weight: placeholder.css("font-weight"),
+ family: placeholder.css("font-family")
+ };
+
+ axisCount = options.xaxes.length || 1;
+ for (i = 0; i < axisCount; ++i) {
+
+ axisOptions = options.xaxes[i];
+ if (axisOptions && !axisOptions.tickColor) {
+ axisOptions.tickColor = axisOptions.color;
+ }
+
+ axisOptions = $.extend(true, {}, options.xaxis, axisOptions);
+ options.xaxes[i] = axisOptions;
+
+ if (axisOptions.font) {
+ axisOptions.font = $.extend({}, fontDefaults, axisOptions.font);
+ if (!axisOptions.font.color) {
+ axisOptions.font.color = axisOptions.color;
+ }
+ if (!axisOptions.font.lineHeight) {
+ axisOptions.font.lineHeight = Math.round(axisOptions.font.size * 1.15);
+ }
+ }
+ }
+
+ axisCount = options.yaxes.length || 1;
+ for (i = 0; i < axisCount; ++i) {
+
+ axisOptions = options.yaxes[i];
+ if (axisOptions && !axisOptions.tickColor) {
+ axisOptions.tickColor = axisOptions.color;
+ }
+
+ axisOptions = $.extend(true, {}, options.yaxis, axisOptions);
+ options.yaxes[i] = axisOptions;
+
+ if (axisOptions.font) {
+ axisOptions.font = $.extend({}, fontDefaults, axisOptions.font);
+ if (!axisOptions.font.color) {
+ axisOptions.font.color = axisOptions.color;
+ }
+ if (!axisOptions.font.lineHeight) {
+ axisOptions.font.lineHeight = Math.round(axisOptions.font.size * 1.15);
+ }
+ }
+ }
+
+ // backwards compatibility, to be removed in future
+ if (options.xaxis.noTicks && options.xaxis.ticks == null)
+ options.xaxis.ticks = options.xaxis.noTicks;
+ if (options.yaxis.noTicks && options.yaxis.ticks == null)
+ options.yaxis.ticks = options.yaxis.noTicks;
+ if (options.x2axis) {
+ options.xaxes[1] = $.extend(true, {}, options.xaxis, options.x2axis);
+ options.xaxes[1].position = "top";
+ // Override the inherit to allow the axis to auto-scale
+ if (options.x2axis.min == null) {
+ options.xaxes[1].min = null;
+ }
+ if (options.x2axis.max == null) {
+ options.xaxes[1].max = null;
+ }
+ }
+ if (options.y2axis) {
+ options.yaxes[1] = $.extend(true, {}, options.yaxis, options.y2axis);
+ options.yaxes[1].position = "right";
+ // Override the inherit to allow the axis to auto-scale
+ if (options.y2axis.min == null) {
+ options.yaxes[1].min = null;
+ }
+ if (options.y2axis.max == null) {
+ options.yaxes[1].max = null;
+ }
+ }
+ if (options.grid.coloredAreas)
+ options.grid.markings = options.grid.coloredAreas;
+ if (options.grid.coloredAreasColor)
+ options.grid.markingsColor = options.grid.coloredAreasColor;
+ if (options.lines)
+ $.extend(true, options.series.lines, options.lines);
+ if (options.points)
+ $.extend(true, options.series.points, options.points);
+ if (options.bars)
+ $.extend(true, options.series.bars, options.bars);
+ if (options.shadowSize != null)
+ options.series.shadowSize = options.shadowSize;
+ if (options.highlightColor != null)
+ options.series.highlightColor = options.highlightColor;
+
+ // save options on axes for future reference
+ for (i = 0; i < options.xaxes.length; ++i)
+ getOrCreateAxis(xaxes, i + 1).options = options.xaxes[i];
+ for (i = 0; i < options.yaxes.length; ++i)
+ getOrCreateAxis(yaxes, i + 1).options = options.yaxes[i];
+
+ // add hooks from options
+ for (var n in hooks)
+ if (options.hooks[n] && options.hooks[n].length)
+ hooks[n] = hooks[n].concat(options.hooks[n]);
+
+ executeHooks(hooks.processOptions, [options]);
+ }
+
+ function setData(d) {
+ series = parseData(d);
+ fillInSeriesOptions();
+ processData();
+ }
+
+ function parseData(d) {
+ var res = [];
+ for (var i = 0; i < d.length; ++i) {
+ var s = $.extend(true, {}, options.series);
+
+ if (d[i].data != null) {
+ s.data = d[i].data; // move the data instead of deep-copy
+ delete d[i].data;
+
+ $.extend(true, s, d[i]);
+
+ d[i].data = s.data;
+ }
+ else
+ s.data = d[i];
+ res.push(s);
+ }
+
+ return res;
+ }
+
+ function axisNumber(obj, coord) {
+ var a = obj[coord + "axis"];
+ if (typeof a == "object") // if we got a real axis, extract number
+ a = a.n;
+ if (typeof a != "number")
+ a = 1; // default to first axis
+ return a;
+ }
+
+ function allAxes() {
+ // return flat array without annoying null entries
+ return $.grep(xaxes.concat(yaxes), function (a) { return a; });
+ }
+
+ function canvasToAxisCoords(pos) {
+ // return an object with x/y corresponding to all used axes
+ var res = {}, i, axis;
+ for (i = 0; i < xaxes.length; ++i) {
+ axis = xaxes[i];
+ if (axis && axis.used)
+ res["x" + axis.n] = axis.c2p(pos.left);
+ }
+
+ for (i = 0; i < yaxes.length; ++i) {
+ axis = yaxes[i];
+ if (axis && axis.used)
+ res["y" + axis.n] = axis.c2p(pos.top);
+ }
+
+ if (res.x1 !== undefined)
+ res.x = res.x1;
+ if (res.y1 !== undefined)
+ res.y = res.y1;
+
+ return res;
+ }
+
+ function axisToCanvasCoords(pos) {
+ // get canvas coords from the first pair of x/y found in pos
+ var res = {}, i, axis, key;
+
+ for (i = 0; i < xaxes.length; ++i) {
+ axis = xaxes[i];
+ if (axis && axis.used) {
+ key = "x" + axis.n;
+ if (pos[key] == null && axis.n == 1)
+ key = "x";
+
+ if (pos[key] != null) {
+ res.left = axis.p2c(pos[key]);
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < yaxes.length; ++i) {
+ axis = yaxes[i];
+ if (axis && axis.used) {
+ key = "y" + axis.n;
+ if (pos[key] == null && axis.n == 1)
+ key = "y";
+
+ if (pos[key] != null) {
+ res.top = axis.p2c(pos[key]);
+ break;
+ }
+ }
+ }
+
+ return res;
+ }
+
+ function getOrCreateAxis(axes, number) {
+ if (!axes[number - 1])
+ axes[number - 1] = {
+ n: number, // save the number for future reference
+ direction: axes == xaxes ? "x" : "y",
+ options: $.extend(true, {}, axes == xaxes ? options.xaxis : options.yaxis)
+ };
+
+ return axes[number - 1];
+ }
+
+ function fillInSeriesOptions() {
+
+ var neededColors = series.length, maxIndex = -1, i;
+
+ // Subtract the number of series that already have fixed colors or
+ // color indexes from the number that we still need to generate.
+
+ for (i = 0; i < series.length; ++i) {
+ var sc = series[i].color;
+ if (sc != null) {
+ neededColors--;
+ if (typeof sc == "number" && sc > maxIndex) {
+ maxIndex = sc;
+ }
+ }
+ }
+
+ // If any of the series have fixed color indexes, then we need to
+ // generate at least as many colors as the highest index.
+
+ if (neededColors <= maxIndex) {
+ neededColors = maxIndex + 1;
+ }
+
+ // Generate all the colors, using first the option colors and then
+ // variations on those colors once they're exhausted.
+
+ var c, colors = [], colorPool = options.colors,
+ colorPoolSize = colorPool.length, variation = 0;
+
+ for (i = 0; i < neededColors; i++) {
+
+ c = $.color.parse(colorPool[i % colorPoolSize] || "#666");
+
+ // Each time we exhaust the colors in the pool we adjust
+ // a scaling factor used to produce more variations on
+ // those colors. The factor alternates negative/positive
+ // to produce lighter/darker colors.
+
+ // Reset the variation after every few cycles, or else
+ // it will end up producing only white or black colors.
+
+ if (i % colorPoolSize == 0 && i) {
+ if (variation >= 0) {
+ if (variation < 0.5) {
+ variation = -variation - 0.2;
+ } else variation = 0;
+ } else variation = -variation;
+ }
+
+ colors[i] = c.scale('rgb', 1 + variation);
+ }
+
+ // Finalize the series options, filling in their colors
+
+ var colori = 0, s;
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+
+ // assign colors
+ if (s.color == null) {
+ s.color = colors[colori].toString();
+ ++colori;
+ }
+ else if (typeof s.color == "number")
+ s.color = colors[s.color].toString();
+
+ // turn on lines automatically in case nothing is set
+ if (s.lines.show == null) {
+ var v, show = true;
+ for (v in s)
+ if (s[v] && s[v].show) {
+ show = false;
+ break;
+ }
+ if (show)
+ s.lines.show = true;
+ }
+
+ // If nothing was provided for lines.zero, default it to match
+ // lines.fill, since areas by default should extend to zero.
+
+ if (s.lines.zero == null) {
+ s.lines.zero = !!s.lines.fill;
+ }
+
+ // setup axes
+ s.xaxis = getOrCreateAxis(xaxes, axisNumber(s, "x"));
+ s.yaxis = getOrCreateAxis(yaxes, axisNumber(s, "y"));
+ }
+ }
+
+ function processData() {
+ var topSentry = Number.POSITIVE_INFINITY,
+ bottomSentry = Number.NEGATIVE_INFINITY,
+ fakeInfinity = Number.MAX_VALUE,
+ i, j, k, m, length,
+ s, points, ps, x, y, axis, val, f, p,
+ data, format;
+
+ function updateAxis(axis, min, max) {
+ if (min < axis.datamin && min != -fakeInfinity)
+ axis.datamin = min;
+ if (max > axis.datamax && max != fakeInfinity)
+ axis.datamax = max;
+ }
+
+ $.each(allAxes(), function (_, axis) {
+ // init axis
+ axis.datamin = topSentry;
+ axis.datamax = bottomSentry;
+ axis.used = false;
+ });
+
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+ s.datapoints = { points: [] };
+
+ executeHooks(hooks.processRawData, [ s, s.data, s.datapoints ]);
+ }
+
+ // first pass: clean and copy data
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+
+ data = s.data;
+ format = s.datapoints.format;
+
+ if (!format) {
+ format = [];
+ // find out how to copy
+ format.push({ x: true, number: true, required: true });
+ format.push({ y: true, number: true, required: true });
+
+ if (s.bars.show || (s.lines.show && s.lines.fill)) {
+ var autoscale = !!((s.bars.show && s.bars.zero) || (s.lines.show && s.lines.zero));
+ format.push({ y: true, number: true, required: false, defaultValue: 0, autoscale: autoscale });
+ if (s.bars.horizontal) {
+ delete format[format.length - 1].y;
+ format[format.length - 1].x = true;
+ }
+ }
+
+ s.datapoints.format = format;
+ }
+
+ if (s.datapoints.pointsize != null)
+ continue; // already filled in
+
+ s.datapoints.pointsize = format.length;
+
+ ps = s.datapoints.pointsize;
+ points = s.datapoints.points;
+
+ var insertSteps = s.lines.show && s.lines.steps;
+ s.xaxis.used = s.yaxis.used = true;
+
+ for (j = k = 0; j < data.length; ++j, k += ps) {
+ p = data[j];
+
+ var nullify = p == null;
+ if (!nullify) {
+ for (m = 0; m < ps; ++m) {
+ val = p[m];
+ f = format[m];
+
+ if (f) {
+ if (f.number && val != null) {
+ val = +val; // convert to number
+ if (isNaN(val))
+ val = null;
+ else if (val == Infinity)
+ val = fakeInfinity;
+ else if (val == -Infinity)
+ val = -fakeInfinity;
+ }
+
+ if (val == null) {
+ if (f.required)
+ nullify = true;
+
+ if (f.defaultValue != null)
+ val = f.defaultValue;
+ }
+ }
+
+ points[k + m] = val;
+ }
+ }
+
+ if (nullify) {
+ for (m = 0; m < ps; ++m) {
+ val = points[k + m];
+ if (val != null) {
+ f = format[m];
+ // extract min/max info
+ if (f.autoscale !== false) {
+ if (f.x) {
+ updateAxis(s.xaxis, val, val);
+ }
+ if (f.y) {
+ updateAxis(s.yaxis, val, val);
+ }
+ }
+ }
+ points[k + m] = null;
+ }
+ }
+ else {
+ // a little bit of line specific stuff that
+ // perhaps shouldn't be here, but lacking
+ // better means...
+ if (insertSteps && k > 0
+ && points[k - ps] != null
+ && points[k - ps] != points[k]
+ && points[k - ps + 1] != points[k + 1]) {
+ // copy the point to make room for a middle point
+ for (m = 0; m < ps; ++m)
+ points[k + ps + m] = points[k + m];
+
+ // middle point has same y
+ points[k + 1] = points[k - ps + 1];
+
+ // we've added a point, better reflect that
+ k += ps;
+ }
+ }
+ }
+ }
+
+ // give the hooks a chance to run
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+
+ executeHooks(hooks.processDatapoints, [ s, s.datapoints]);
+ }
+
+ // second pass: find datamax/datamin for auto-scaling
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+ points = s.datapoints.points;
+ ps = s.datapoints.pointsize;
+ format = s.datapoints.format;
+
+ var xmin = topSentry, ymin = topSentry,
+ xmax = bottomSentry, ymax = bottomSentry;
+
+ for (j = 0; j < points.length; j += ps) {
+ if (points[j] == null)
+ continue;
+
+ for (m = 0; m < ps; ++m) {
+ val = points[j + m];
+ f = format[m];
+ if (!f || f.autoscale === false || val == fakeInfinity || val == -fakeInfinity)
+ continue;
+
+ if (f.x) {
+ if (val < xmin)
+ xmin = val;
+ if (val > xmax)
+ xmax = val;
+ }
+ if (f.y) {
+ if (val < ymin)
+ ymin = val;
+ if (val > ymax)
+ ymax = val;
+ }
+ }
+ }
+
+ if (s.bars.show) {
+ // make sure we got room for the bar on the dancing floor
+ var delta;
+
+ switch (s.bars.align) {
+ case "left":
+ delta = 0;
+ break;
+ case "right":
+ delta = -s.bars.barWidth;
+ break;
+ default:
+ delta = -s.bars.barWidth / 2;
+ }
+
+ if (s.bars.horizontal) {
+ ymin += delta;
+ ymax += delta + s.bars.barWidth;
+ }
+ else {
+ xmin += delta;
+ xmax += delta + s.bars.barWidth;
+ }
+ }
+
+ updateAxis(s.xaxis, xmin, xmax);
+ updateAxis(s.yaxis, ymin, ymax);
+ }
+
+ $.each(allAxes(), function (_, axis) {
+ if (axis.datamin == topSentry)
+ axis.datamin = null;
+ if (axis.datamax == bottomSentry)
+ axis.datamax = null;
+ });
+ }
+
+ function setupCanvases() {
+
+ // Make sure the placeholder is clear of everything except canvases
+ // from a previous plot in this container that we'll try to re-use.
+
+ placeholder.css("padding", 0) // padding messes up the positioning
+ .children().filter(function(){
+ return !$(this).hasClass("flot-overlay") && !$(this).hasClass('flot-base');
+ }).remove();
+
+ if (placeholder.css("position") == 'static')
+ placeholder.css("position", "relative"); // for positioning labels and overlay
+
+ surface = new Canvas("flot-base", placeholder);
+ overlay = new Canvas("flot-overlay", placeholder); // overlay canvas for interactive features
+
+ ctx = surface.context;
+ octx = overlay.context;
+
+ // define which element we're listening for events on
+ eventHolder = $(overlay.element).unbind();
+
+ // If we're re-using a plot object, shut down the old one
+
+ var existing = placeholder.data("plot");
+
+ if (existing) {
+ existing.shutdown();
+ overlay.clear();
+ }
+
+ // save in case we get replotted
+ placeholder.data("plot", plot);
+ }
+
+ function bindEvents() {
+ // bind events
+ if (options.grid.hoverable) {
+ eventHolder.mousemove(onMouseMove);
+
+ // Use bind, rather than .mouseleave, because we officially
+ // still support jQuery 1.2.6, which doesn't define a shortcut
+ // for mouseenter or mouseleave. This was a bug/oversight that
+ // was fixed somewhere around 1.3.x. We can return to using
+ // .mouseleave when we drop support for 1.2.6.
+
+ eventHolder.bind("mouseleave", onMouseLeave);
+ }
+
+ if (options.grid.clickable)
+ eventHolder.click(onClick);
+
+ executeHooks(hooks.bindEvents, [eventHolder]);
+ }
+
+ function shutdown() {
+ if (redrawTimeout)
+ clearTimeout(redrawTimeout);
+
+ eventHolder.unbind("mousemove", onMouseMove);
+ eventHolder.unbind("mouseleave", onMouseLeave);
+ eventHolder.unbind("click", onClick);
+
+ executeHooks(hooks.shutdown, [eventHolder]);
+ }
+
+ function setTransformationHelpers(axis) {
+ // set helper functions on the axis, assumes plot area
+ // has been computed already
+
+ function identity(x) { return x; }
+
+ var s, m, t = axis.options.transform || identity,
+ it = axis.options.inverseTransform;
+
+ // precompute how much the axis is scaling a point
+ // in canvas space
+ if (axis.direction == "x") {
+ s = axis.scale = plotWidth / Math.abs(t(axis.max) - t(axis.min));
+ m = Math.min(t(axis.max), t(axis.min));
+ }
+ else {
+ s = axis.scale = plotHeight / Math.abs(t(axis.max) - t(axis.min));
+ s = -s;
+ m = Math.max(t(axis.max), t(axis.min));
+ }
+
+ // data point to canvas coordinate
+ if (t == identity) // slight optimization
+ axis.p2c = function (p) { return (p - m) * s; };
+ else
+ axis.p2c = function (p) { return (t(p) - m) * s; };
+ // canvas coordinate to data point
+ if (!it)
+ axis.c2p = function (c) { return m + c / s; };
+ else
+ axis.c2p = function (c) { return it(m + c / s); };
+ }
+
+ function measureTickLabels(axis) {
+
+ var opts = axis.options,
+ ticks = axis.ticks || [],
+ labelWidth = opts.labelWidth || 0,
+ labelHeight = opts.labelHeight || 0,
+ maxWidth = labelWidth || (axis.direction == "x" ? Math.floor(surface.width / (ticks.length || 1)) : null),
+ legacyStyles = axis.direction + "Axis " + axis.direction + axis.n + "Axis",
+ layer = "flot-" + axis.direction + "-axis flot-" + axis.direction + axis.n + "-axis " + legacyStyles,
+ font = opts.font || "flot-tick-label tickLabel";
+
+ for (var i = 0; i < ticks.length; ++i) {
+
+ var t = ticks[i];
+
+ if (!t.label)
+ continue;
+
+ var info = surface.getTextInfo(layer, t.label, font, null, maxWidth);
+
+ labelWidth = Math.max(labelWidth, info.width);
+ labelHeight = Math.max(labelHeight, info.height);
+ }
+
+ axis.labelWidth = opts.labelWidth || labelWidth;
+ axis.labelHeight = opts.labelHeight || labelHeight;
+ }
+
+ function allocateAxisBoxFirstPhase(axis) {
+ // find the bounding box of the axis by looking at label
+ // widths/heights and ticks, make room by diminishing the
+ // plotOffset; this first phase only looks at one
+ // dimension per axis, the other dimension depends on the
+ // other axes so will have to wait
+
+ var lw = axis.labelWidth,
+ lh = axis.labelHeight,
+ pos = axis.options.position,
+ isXAxis = axis.direction === "x",
+ tickLength = axis.options.tickLength,
+ axisMargin = options.grid.axisMargin,
+ padding = options.grid.labelMargin,
+ innermost = true,
+ outermost = true,
+ first = true,
+ found = false;
+
+ // Determine the axis's position in its direction and on its side
+
+ $.each(isXAxis ? xaxes : yaxes, function(i, a) {
+ if (a && (a.show || a.reserveSpace)) {
+ if (a === axis) {
+ found = true;
+ } else if (a.options.position === pos) {
+ if (found) {
+ outermost = false;
+ } else {
+ innermost = false;
+ }
+ }
+ if (!found) {
+ first = false;
+ }
+ }
+ });
+
+ // The outermost axis on each side has no margin
+
+ if (outermost) {
+ axisMargin = 0;
+ }
+
+ // The ticks for the first axis in each direction stretch across
+
+ if (tickLength == null) {
+ tickLength = first ? "full" : 5;
+ }
+
+ if (!isNaN(+tickLength))
+ padding += +tickLength;
+
+ if (isXAxis) {
+ lh += padding;
+
+ if (pos == "bottom") {
+ plotOffset.bottom += lh + axisMargin;
+ axis.box = { top: surface.height - plotOffset.bottom, height: lh };
+ }
+ else {
+ axis.box = { top: plotOffset.top + axisMargin, height: lh };
+ plotOffset.top += lh + axisMargin;
+ }
+ }
+ else {
+ lw += padding;
+
+ if (pos == "left") {
+ axis.box = { left: plotOffset.left + axisMargin, width: lw };
+ plotOffset.left += lw + axisMargin;
+ }
+ else {
+ plotOffset.right += lw + axisMargin;
+ axis.box = { left: surface.width - plotOffset.right, width: lw };
+ }
+ }
+
+ // save for future reference
+ axis.position = pos;
+ axis.tickLength = tickLength;
+ axis.box.padding = padding;
+ axis.innermost = innermost;
+ }
+
+ function allocateAxisBoxSecondPhase(axis) {
+ // now that all axis boxes have been placed in one
+ // dimension, we can set the remaining dimension coordinates
+ if (axis.direction == "x") {
+ axis.box.left = plotOffset.left - axis.labelWidth / 2;
+ axis.box.width = surface.width - plotOffset.left - plotOffset.right + axis.labelWidth;
+ }
+ else {
+ axis.box.top = plotOffset.top - axis.labelHeight / 2;
+ axis.box.height = surface.height - plotOffset.bottom - plotOffset.top + axis.labelHeight;
+ }
+ }
+
+ function adjustLayoutForThingsStickingOut() {
+ // possibly adjust plot offset to ensure everything stays
+ // inside the canvas and isn't clipped off
+
+ var minMargin = options.grid.minBorderMargin,
+ axis, i;
+
+ // check stuff from the plot (FIXME: this should just read
+ // a value from the series, otherwise it's impossible to
+ // customize)
+ if (minMargin == null) {
+ minMargin = 0;
+ for (i = 0; i < series.length; ++i)
+ minMargin = Math.max(minMargin, 2 * (series[i].points.radius + series[i].points.lineWidth/2));
+ }
+
+ var margins = {
+ left: minMargin,
+ right: minMargin,
+ top: minMargin,
+ bottom: minMargin
+ };
+
+ // check axis labels, note we don't check the actual
+ // labels but instead use the overall width/height to not
+ // jump as much around with replots
+ $.each(allAxes(), function (_, axis) {
+ if (axis.reserveSpace && axis.ticks && axis.ticks.length) {
+ if (axis.direction === "x") {
+ margins.left = Math.max(margins.left, axis.labelWidth / 2);
+ margins.right = Math.max(margins.right, axis.labelWidth / 2);
+ } else {
+ margins.bottom = Math.max(margins.bottom, axis.labelHeight / 2);
+ margins.top = Math.max(margins.top, axis.labelHeight / 2);
+ }
+ }
+ });
+
+ plotOffset.left = Math.ceil(Math.max(margins.left, plotOffset.left));
+ plotOffset.right = Math.ceil(Math.max(margins.right, plotOffset.right));
+ plotOffset.top = Math.ceil(Math.max(margins.top, plotOffset.top));
+ plotOffset.bottom = Math.ceil(Math.max(margins.bottom, plotOffset.bottom));
+ }
+
+ function setupGrid() {
+ var i, axes = allAxes(), showGrid = options.grid.show;
+
+ // Initialize the plot's offset from the edge of the canvas
+
+ for (var a in plotOffset) {
+ var margin = options.grid.margin || 0;
+ plotOffset[a] = typeof margin == "number" ? margin : margin[a] || 0;
+ }
+
+ executeHooks(hooks.processOffset, [plotOffset]);
+
+ // If the grid is visible, add its border width to the offset
+
+ for (var a in plotOffset) {
+ if(typeof(options.grid.borderWidth) == "object") {
+ plotOffset[a] += showGrid ? options.grid.borderWidth[a] : 0;
+ }
+ else {
+ plotOffset[a] += showGrid ? options.grid.borderWidth : 0;
+ }
+ }
+
+ $.each(axes, function (_, axis) {
+ var axisOpts = axis.options;
+ axis.show = axisOpts.show == null ? axis.used : axisOpts.show;
+ axis.reserveSpace = axisOpts.reserveSpace == null ? axis.show : axisOpts.reserveSpace;
+ setRange(axis);
+ });
+
+ if (showGrid) {
+
+ var allocatedAxes = $.grep(axes, function (axis) {
+ return axis.show || axis.reserveSpace;
+ });
+
+ $.each(allocatedAxes, function (_, axis) {
+ // make the ticks
+ setupTickGeneration(axis);
+ setTicks(axis);
+ snapRangeToTicks(axis, axis.ticks);
+ // find labelWidth/Height for axis
+ measureTickLabels(axis);
+ });
+
+ // with all dimensions calculated, we can compute the
+ // axis bounding boxes, start from the outside
+ // (reverse order)
+ for (i = allocatedAxes.length - 1; i >= 0; --i)
+ allocateAxisBoxFirstPhase(allocatedAxes[i]);
+
+ // make sure we've got enough space for things that
+ // might stick out
+ adjustLayoutForThingsStickingOut();
+
+ $.each(allocatedAxes, function (_, axis) {
+ allocateAxisBoxSecondPhase(axis);
+ });
+ }
+
+ plotWidth = surface.width - plotOffset.left - plotOffset.right;
+ plotHeight = surface.height - plotOffset.bottom - plotOffset.top;
+
+ // now we got the proper plot dimensions, we can compute the scaling
+ $.each(axes, function (_, axis) {
+ setTransformationHelpers(axis);
+ });
+
+ if (showGrid) {
+ drawAxisLabels();
+ }
+
+ insertLegend();
+ }
+
+ function setRange(axis) {
+ var opts = axis.options,
+ min = +(opts.min != null ? opts.min : axis.datamin),
+ max = +(opts.max != null ? opts.max : axis.datamax),
+ delta = max - min;
+
+ if (delta == 0.0) {
+ // degenerate case
+ var widen = max == 0 ? 1 : 0.01;
+
+ if (opts.min == null)
+ min -= widen;
+ // always widen max if we couldn't widen min to ensure we
+ // don't fall into min == max which doesn't work
+ if (opts.max == null || opts.min != null)
+ max += widen;
+ }
+ else {
+ // consider autoscaling
+ var margin = opts.autoscaleMargin;
+ if (margin != null) {
+ if (opts.min == null) {
+ min -= delta * margin;
+ // make sure we don't go below zero if all values
+ // are positive
+ if (min < 0 && axis.datamin != null && axis.datamin >= 0)
+ min = 0;
+ }
+ if (opts.max == null) {
+ max += delta * margin;
+ if (max > 0 && axis.datamax != null && axis.datamax <= 0)
+ max = 0;
+ }
+ }
+ }
+ axis.min = min;
+ axis.max = max;
+ }
+
+ function setupTickGeneration(axis) {
+ var opts = axis.options;
+
+ // estimate number of ticks
+ var noTicks;
+ if (typeof opts.ticks == "number" && opts.ticks > 0)
+ noTicks = opts.ticks;
+ else
+ // heuristic based on the model a*sqrt(x) fitted to
+ // some data points that seemed reasonable
+ noTicks = 0.3 * Math.sqrt(axis.direction == "x" ? surface.width : surface.height);
+
+ var delta = (axis.max - axis.min) / noTicks,
+ dec = -Math.floor(Math.log(delta) / Math.LN10),
+ maxDec = opts.tickDecimals;
+
+ if (maxDec != null && dec > maxDec) {
+ dec = maxDec;
+ }
+
+ var magn = Math.pow(10, -dec),
+ norm = delta / magn, // norm is between 1.0 and 10.0
+ size;
+
+ if (norm < 1.5) {
+ size = 1;
+ } else if (norm < 3) {
+ size = 2;
+ // special case for 2.5, requires an extra decimal
+ if (norm > 2.25 && (maxDec == null || dec + 1 <= maxDec)) {
+ size = 2.5;
+ ++dec;
+ }
+ } else if (norm < 7.5) {
+ size = 5;
+ } else {
+ size = 10;
+ }
+
+ size *= magn;
+
+ if (opts.minTickSize != null && size < opts.minTickSize) {
+ size = opts.minTickSize;
+ }
+
+ axis.delta = delta;
+ axis.tickDecimals = Math.max(0, maxDec != null ? maxDec : dec);
+ axis.tickSize = opts.tickSize || size;
+
+ // Time mode was moved to a plug-in in 0.8, and since so many people use it
+ // we'll add an especially friendly reminder to make sure they included it.
+
+ if (opts.mode == "time" && !axis.tickGenerator) {
+ throw new Error("Time mode requires the flot.time plugin.");
+ }
+
+ // Flot supports base-10 axes; any other mode else is handled by a plug-in,
+ // like flot.time.js.
+
+ if (!axis.tickGenerator) {
+
+ axis.tickGenerator = function (axis) {
+
+ var ticks = [],
+ start = floorInBase(axis.min, axis.tickSize),
+ i = 0,
+ v = Number.NaN,
+ prev;
+
+ do {
+ prev = v;
+ v = start + i * axis.tickSize;
+ ticks.push(v);
+ ++i;
+ } while (v < axis.max && v != prev);
+ return ticks;
+ };
+
+ axis.tickFormatter = function (value, axis) {
+
+ var factor = axis.tickDecimals ? Math.pow(10, axis.tickDecimals) : 1;
+ var formatted = "" + Math.round(value * factor) / factor;
+
+ // If tickDecimals was specified, ensure that we have exactly that
+ // much precision; otherwise default to the value's own precision.
+
+ if (axis.tickDecimals != null) {
+ var decimal = formatted.indexOf(".");
+ var precision = decimal == -1 ? 0 : formatted.length - decimal - 1;
+ if (precision < axis.tickDecimals) {
+ return (precision ? formatted : formatted + ".") + ("" + factor).substr(1, axis.tickDecimals - precision);
+ }
+ }
+
+ return formatted;
+ };
+ }
+
+ if ($.isFunction(opts.tickFormatter))
+ axis.tickFormatter = function (v, axis) { return "" + opts.tickFormatter(v, axis); };
+
+ if (opts.alignTicksWithAxis != null) {
+ var otherAxis = (axis.direction == "x" ? xaxes : yaxes)[opts.alignTicksWithAxis - 1];
+ if (otherAxis && otherAxis.used && otherAxis != axis) {
+ // consider snapping min/max to outermost nice ticks
+ var niceTicks = axis.tickGenerator(axis);
+ if (niceTicks.length > 0) {
+ if (opts.min == null)
+ axis.min = Math.min(axis.min, niceTicks[0]);
+ if (opts.max == null && niceTicks.length > 1)
+ axis.max = Math.max(axis.max, niceTicks[niceTicks.length - 1]);
+ }
+
+ axis.tickGenerator = function (axis) {
+ // copy ticks, scaled to this axis
+ var ticks = [], v, i;
+ for (i = 0; i < otherAxis.ticks.length; ++i) {
+ v = (otherAxis.ticks[i].v - otherAxis.min) / (otherAxis.max - otherAxis.min);
+ v = axis.min + v * (axis.max - axis.min);
+ ticks.push(v);
+ }
+ return ticks;
+ };
+
+ // we might need an extra decimal since forced
+ // ticks don't necessarily fit naturally
+ if (!axis.mode && opts.tickDecimals == null) {
+ var extraDec = Math.max(0, -Math.floor(Math.log(axis.delta) / Math.LN10) + 1),
+ ts = axis.tickGenerator(axis);
+
+ // only proceed if the tick interval rounded
+ // with an extra decimal doesn't give us a
+ // zero at end
+ if (!(ts.length > 1 && /\..*0$/.test((ts[1] - ts[0]).toFixed(extraDec))))
+ axis.tickDecimals = extraDec;
+ }
+ }
+ }
+ }
+
+ function setTicks(axis) {
+ var oticks = axis.options.ticks, ticks = [];
+ if (oticks == null || (typeof oticks == "number" && oticks > 0))
+ ticks = axis.tickGenerator(axis);
+ else if (oticks) {
+ if ($.isFunction(oticks))
+ // generate the ticks
+ ticks = oticks(axis);
+ else
+ ticks = oticks;
+ }
+
+ // clean up/labelify the supplied ticks, copy them over
+ var i, v;
+ axis.ticks = [];
+ for (i = 0; i < ticks.length; ++i) {
+ var label = null;
+ var t = ticks[i];
+ if (typeof t == "object") {
+ v = +t[0];
+ if (t.length > 1)
+ label = t[1];
+ }
+ else
+ v = +t;
+ if (label == null)
+ label = axis.tickFormatter(v, axis);
+ if (!isNaN(v))
+ axis.ticks.push({ v: v, label: label });
+ }
+ }
+
+ function snapRangeToTicks(axis, ticks) {
+ if (axis.options.autoscaleMargin && ticks.length > 0) {
+ // snap to ticks
+ if (axis.options.min == null)
+ axis.min = Math.min(axis.min, ticks[0].v);
+ if (axis.options.max == null && ticks.length > 1)
+ axis.max = Math.max(axis.max, ticks[ticks.length - 1].v);
+ }
+ }
+
+ function draw() {
+
+ surface.clear();
+
+ executeHooks(hooks.drawBackground, [ctx]);
+
+ var grid = options.grid;
+
+ // draw background, if any
+ if (grid.show && grid.backgroundColor)
+ drawBackground();
+
+ if (grid.show && !grid.aboveData) {
+ drawGrid();
+ }
+
+ for (var i = 0; i < series.length; ++i) {
+ executeHooks(hooks.drawSeries, [ctx, series[i]]);
+ drawSeries(series[i]);
+ }
+
+ executeHooks(hooks.draw, [ctx]);
+
+ if (grid.show && grid.aboveData) {
+ drawGrid();
+ }
+
+ surface.render();
+
+ // A draw implies that either the axes or data have changed, so we
+ // should probably update the overlay highlights as well.
+
+ triggerRedrawOverlay();
+ }
+
+ function extractRange(ranges, coord) {
+ var axis, from, to, key, axes = allAxes();
+
+ for (var i = 0; i < axes.length; ++i) {
+ axis = axes[i];
+ if (axis.direction == coord) {
+ key = coord + axis.n + "axis";
+ if (!ranges[key] && axis.n == 1)
+ key = coord + "axis"; // support x1axis as xaxis
+ if (ranges[key]) {
+ from = ranges[key].from;
+ to = ranges[key].to;
+ break;
+ }
+ }
+ }
+
+ // backwards-compat stuff - to be removed in future
+ if (!ranges[key]) {
+ axis = coord == "x" ? xaxes[0] : yaxes[0];
+ from = ranges[coord + "1"];
+ to = ranges[coord + "2"];
+ }
+
+ // auto-reverse as an added bonus
+ if (from != null && to != null && from > to) {
+ var tmp = from;
+ from = to;
+ to = tmp;
+ }
+
+ return { from: from, to: to, axis: axis };
+ }
+
+ function drawBackground() {
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+
+ ctx.fillStyle = getColorOrGradient(options.grid.backgroundColor, plotHeight, 0, "rgba(255, 255, 255, 0)");
+ ctx.fillRect(0, 0, plotWidth, plotHeight);
+ ctx.restore();
+ }
+
+ function drawGrid() {
+ var i, axes, bw, bc;
+
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+
+ // draw markings
+ var markings = options.grid.markings;
+ if (markings) {
+ if ($.isFunction(markings)) {
+ axes = plot.getAxes();
+ // xmin etc. is backwards compatibility, to be
+ // removed in the future
+ axes.xmin = axes.xaxis.min;
+ axes.xmax = axes.xaxis.max;
+ axes.ymin = axes.yaxis.min;
+ axes.ymax = axes.yaxis.max;
+
+ markings = markings(axes);
+ }
+
+ for (i = 0; i < markings.length; ++i) {
+ var m = markings[i],
+ xrange = extractRange(m, "x"),
+ yrange = extractRange(m, "y");
+
+ // fill in missing
+ if (xrange.from == null)
+ xrange.from = xrange.axis.min;
+ if (xrange.to == null)
+ xrange.to = xrange.axis.max;
+ if (yrange.from == null)
+ yrange.from = yrange.axis.min;
+ if (yrange.to == null)
+ yrange.to = yrange.axis.max;
+
+ // clip
+ if (xrange.to < xrange.axis.min || xrange.from > xrange.axis.max ||
+ yrange.to < yrange.axis.min || yrange.from > yrange.axis.max)
+ continue;
+
+ xrange.from = Math.max(xrange.from, xrange.axis.min);
+ xrange.to = Math.min(xrange.to, xrange.axis.max);
+ yrange.from = Math.max(yrange.from, yrange.axis.min);
+ yrange.to = Math.min(yrange.to, yrange.axis.max);
+
+ var xequal = xrange.from === xrange.to,
+ yequal = yrange.from === yrange.to;
+
+ if (xequal && yequal) {
+ continue;
+ }
+
+ // then draw
+ xrange.from = Math.floor(xrange.axis.p2c(xrange.from));
+ xrange.to = Math.floor(xrange.axis.p2c(xrange.to));
+ yrange.from = Math.floor(yrange.axis.p2c(yrange.from));
+ yrange.to = Math.floor(yrange.axis.p2c(yrange.to));
+
+ if (xequal || yequal) {
+ var lineWidth = m.lineWidth || options.grid.markingsLineWidth,
+ subPixel = lineWidth % 2 ? 0.5 : 0;
+ ctx.beginPath();
+ ctx.strokeStyle = m.color || options.grid.markingsColor;
+ ctx.lineWidth = lineWidth;
+ if (xequal) {
+ ctx.moveTo(xrange.to + subPixel, yrange.from);
+ ctx.lineTo(xrange.to + subPixel, yrange.to);
+ } else {
+ ctx.moveTo(xrange.from, yrange.to + subPixel);
+ ctx.lineTo(xrange.to, yrange.to + subPixel);
+ }
+ ctx.stroke();
+ } else {
+ ctx.fillStyle = m.color || options.grid.markingsColor;
+ ctx.fillRect(xrange.from, yrange.to,
+ xrange.to - xrange.from,
+ yrange.from - yrange.to);
+ }
+ }
+ }
+
+ // draw the ticks
+ axes = allAxes();
+ bw = options.grid.borderWidth;
+
+ for (var j = 0; j < axes.length; ++j) {
+ var axis = axes[j], box = axis.box,
+ t = axis.tickLength, x, y, xoff, yoff;
+ if (!axis.show || axis.ticks.length == 0)
+ continue;
+
+ ctx.lineWidth = 1;
+
+ // find the edges
+ if (axis.direction == "x") {
+ x = 0;
+ if (t == "full")
+ y = (axis.position == "top" ? 0 : plotHeight);
+ else
+ y = box.top - plotOffset.top + (axis.position == "top" ? box.height : 0);
+ }
+ else {
+ y = 0;
+ if (t == "full")
+ x = (axis.position == "left" ? 0 : plotWidth);
+ else
+ x = box.left - plotOffset.left + (axis.position == "left" ? box.width : 0);
+ }
+
+ // draw tick bar
+ if (!axis.innermost) {
+ ctx.strokeStyle = axis.options.color;
+ ctx.beginPath();
+ xoff = yoff = 0;
+ if (axis.direction == "x")
+ xoff = plotWidth + 1;
+ else
+ yoff = plotHeight + 1;
+
+ if (ctx.lineWidth == 1) {
+ if (axis.direction == "x") {
+ y = Math.floor(y) + 0.5;
+ } else {
+ x = Math.floor(x) + 0.5;
+ }
+ }
+
+ ctx.moveTo(x, y);
+ ctx.lineTo(x + xoff, y + yoff);
+ ctx.stroke();
+ }
+
+ // draw ticks
+
+ ctx.strokeStyle = axis.options.tickColor;
+
+ ctx.beginPath();
+ for (i = 0; i < axis.ticks.length; ++i) {
+ var v = axis.ticks[i].v;
+
+ xoff = yoff = 0;
+
+ if (isNaN(v) || v < axis.min || v > axis.max
+ // skip those lying on the axes if we got a border
+ || (t == "full"
+ && ((typeof bw == "object" && bw[axis.position] > 0) || bw > 0)
+ && (v == axis.min || v == axis.max)))
+ continue;
+
+ if (axis.direction == "x") {
+ x = axis.p2c(v);
+ yoff = t == "full" ? -plotHeight : t;
+
+ if (axis.position == "top")
+ yoff = -yoff;
+ }
+ else {
+ y = axis.p2c(v);
+ xoff = t == "full" ? -plotWidth : t;
+
+ if (axis.position == "left")
+ xoff = -xoff;
+ }
+
+ if (ctx.lineWidth == 1) {
+ if (axis.direction == "x")
+ x = Math.floor(x) + 0.5;
+ else
+ y = Math.floor(y) + 0.5;
+ }
+
+ ctx.moveTo(x, y);
+ ctx.lineTo(x + xoff, y + yoff);
+ }
+
+ ctx.stroke();
+ }
+
+
+ // draw border
+ if (bw) {
+ // If either borderWidth or borderColor is an object, then draw the border
+ // line by line instead of as one rectangle
+ bc = options.grid.borderColor;
+ if(typeof bw == "object" || typeof bc == "object") {
+ if (typeof bw !== "object") {
+ bw = {top: bw, right: bw, bottom: bw, left: bw};
+ }
+ if (typeof bc !== "object") {
+ bc = {top: bc, right: bc, bottom: bc, left: bc};
+ }
+
+ if (bw.top > 0) {
+ ctx.strokeStyle = bc.top;
+ ctx.lineWidth = bw.top;
+ ctx.beginPath();
+ ctx.moveTo(0 - bw.left, 0 - bw.top/2);
+ ctx.lineTo(plotWidth, 0 - bw.top/2);
+ ctx.stroke();
+ }
+
+ if (bw.right > 0) {
+ ctx.strokeStyle = bc.right;
+ ctx.lineWidth = bw.right;
+ ctx.beginPath();
+ ctx.moveTo(plotWidth + bw.right / 2, 0 - bw.top);
+ ctx.lineTo(plotWidth + bw.right / 2, plotHeight);
+ ctx.stroke();
+ }
+
+ if (bw.bottom > 0) {
+ ctx.strokeStyle = bc.bottom;
+ ctx.lineWidth = bw.bottom;
+ ctx.beginPath();
+ ctx.moveTo(plotWidth + bw.right, plotHeight + bw.bottom / 2);
+ ctx.lineTo(0, plotHeight + bw.bottom / 2);
+ ctx.stroke();
+ }
+
+ if (bw.left > 0) {
+ ctx.strokeStyle = bc.left;
+ ctx.lineWidth = bw.left;
+ ctx.beginPath();
+ ctx.moveTo(0 - bw.left/2, plotHeight + bw.bottom);
+ ctx.lineTo(0- bw.left/2, 0);
+ ctx.stroke();
+ }
+ }
+ else {
+ ctx.lineWidth = bw;
+ ctx.strokeStyle = options.grid.borderColor;
+ ctx.strokeRect(-bw/2, -bw/2, plotWidth + bw, plotHeight + bw);
+ }
+ }
+
+ ctx.restore();
+ }
+
+ function drawAxisLabels() {
+
+ $.each(allAxes(), function (_, axis) {
+ var box = axis.box,
+ legacyStyles = axis.direction + "Axis " + axis.direction + axis.n + "Axis",
+ layer = "flot-" + axis.direction + "-axis flot-" + axis.direction + axis.n + "-axis " + legacyStyles,
+ font = axis.options.font || "flot-tick-label tickLabel",
+ tick, x, y, halign, valign;
+
+ // Remove text before checking for axis.show and ticks.length;
+ // otherwise plugins, like flot-tickrotor, that draw their own
+ // tick labels will end up with both theirs and the defaults.
+
+ surface.removeText(layer);
+
+ if (!axis.show || axis.ticks.length == 0)
+ return;
+
+ for (var i = 0; i < axis.ticks.length; ++i) {
+
+ tick = axis.ticks[i];
+ if (!tick.label || tick.v < axis.min || tick.v > axis.max)
+ continue;
+
+ if (axis.direction == "x") {
+ halign = "center";
+ x = plotOffset.left + axis.p2c(tick.v);
+ if (axis.position == "bottom") {
+ y = box.top + box.padding;
+ } else {
+ y = box.top + box.height - box.padding;
+ valign = "bottom";
+ }
+ } else {
+ valign = "middle";
+ y = plotOffset.top + axis.p2c(tick.v);
+ if (axis.position == "left") {
+ x = box.left + box.width - box.padding;
+ halign = "right";
+ } else {
+ x = box.left + box.padding;
+ }
+ }
+
+ surface.addText(layer, x, y, tick.label, font, null, null, halign, valign);
+ }
+ });
+ }
+
+ function drawSeries(series) {
+ if (series.lines.show)
+ drawSeriesLines(series);
+ if (series.bars.show)
+ drawSeriesBars(series);
+ if (series.points.show)
+ drawSeriesPoints(series);
+ }
+
+ function drawSeriesLines(series) {
+ function plotLine(datapoints, xoffset, yoffset, axisx, axisy) {
+ var points = datapoints.points,
+ ps = datapoints.pointsize,
+ prevx = null, prevy = null;
+
+ ctx.beginPath();
+ for (var i = ps; i < points.length; i += ps) {
+ var x1 = points[i - ps], y1 = points[i - ps + 1],
+ x2 = points[i], y2 = points[i + 1];
+
+ if (x1 == null || x2 == null)
+ continue;
+
+ // clip with ymin
+ if (y1 <= y2 && y1 < axisy.min) {
+ if (y2 < axisy.min)
+ continue; // line segment is outside
+ // compute new intersection point
+ x1 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y1 = axisy.min;
+ }
+ else if (y2 <= y1 && y2 < axisy.min) {
+ if (y1 < axisy.min)
+ continue;
+ x2 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y2 = axisy.min;
+ }
+
+ // clip with ymax
+ if (y1 >= y2 && y1 > axisy.max) {
+ if (y2 > axisy.max)
+ continue;
+ x1 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y1 = axisy.max;
+ }
+ else if (y2 >= y1 && y2 > axisy.max) {
+ if (y1 > axisy.max)
+ continue;
+ x2 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y2 = axisy.max;
+ }
+
+ // clip with xmin
+ if (x1 <= x2 && x1 < axisx.min) {
+ if (x2 < axisx.min)
+ continue;
+ y1 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x1 = axisx.min;
+ }
+ else if (x2 <= x1 && x2 < axisx.min) {
+ if (x1 < axisx.min)
+ continue;
+ y2 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x2 = axisx.min;
+ }
+
+ // clip with xmax
+ if (x1 >= x2 && x1 > axisx.max) {
+ if (x2 > axisx.max)
+ continue;
+ y1 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x1 = axisx.max;
+ }
+ else if (x2 >= x1 && x2 > axisx.max) {
+ if (x1 > axisx.max)
+ continue;
+ y2 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x2 = axisx.max;
+ }
+
+ if (x1 != prevx || y1 != prevy)
+ ctx.moveTo(axisx.p2c(x1) + xoffset, axisy.p2c(y1) + yoffset);
+
+ prevx = x2;
+ prevy = y2;
+ ctx.lineTo(axisx.p2c(x2) + xoffset, axisy.p2c(y2) + yoffset);
+ }
+ ctx.stroke();
+ }
+
+ function plotLineArea(datapoints, axisx, axisy) {
+ var points = datapoints.points,
+ ps = datapoints.pointsize,
+ bottom = Math.min(Math.max(0, axisy.min), axisy.max),
+ i = 0, top, areaOpen = false,
+ ypos = 1, segmentStart = 0, segmentEnd = 0;
+
+ // we process each segment in two turns, first forward
+ // direction to sketch out top, then once we hit the
+ // end we go backwards to sketch the bottom
+ while (true) {
+ if (ps > 0 && i > points.length + ps)
+ break;
+
+ i += ps; // ps is negative if going backwards
+
+ var x1 = points[i - ps],
+ y1 = points[i - ps + ypos],
+ x2 = points[i], y2 = points[i + ypos];
+
+ if (areaOpen) {
+ if (ps > 0 && x1 != null && x2 == null) {
+ // at turning point
+ segmentEnd = i;
+ ps = -ps;
+ ypos = 2;
+ continue;
+ }
+
+ if (ps < 0 && i == segmentStart + ps) {
+ // done with the reverse sweep
+ ctx.fill();
+ areaOpen = false;
+ ps = -ps;
+ ypos = 1;
+ i = segmentStart = segmentEnd + ps;
+ continue;
+ }
+ }
+
+ if (x1 == null || x2 == null)
+ continue;
+
+ // clip x values
+
+ // clip with xmin
+ if (x1 <= x2 && x1 < axisx.min) {
+ if (x2 < axisx.min)
+ continue;
+ y1 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x1 = axisx.min;
+ }
+ else if (x2 <= x1 && x2 < axisx.min) {
+ if (x1 < axisx.min)
+ continue;
+ y2 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x2 = axisx.min;
+ }
+
+ // clip with xmax
+ if (x1 >= x2 && x1 > axisx.max) {
+ if (x2 > axisx.max)
+ continue;
+ y1 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x1 = axisx.max;
+ }
+ else if (x2 >= x1 && x2 > axisx.max) {
+ if (x1 > axisx.max)
+ continue;
+ y2 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x2 = axisx.max;
+ }
+
+ if (!areaOpen) {
+ // open area
+ ctx.beginPath();
+ ctx.moveTo(axisx.p2c(x1), axisy.p2c(bottom));
+ areaOpen = true;
+ }
+
+ // now first check the case where both is outside
+ if (y1 >= axisy.max && y2 >= axisy.max) {
+ ctx.lineTo(axisx.p2c(x1), axisy.p2c(axisy.max));
+ ctx.lineTo(axisx.p2c(x2), axisy.p2c(axisy.max));
+ continue;
+ }
+ else if (y1 <= axisy.min && y2 <= axisy.min) {
+ ctx.lineTo(axisx.p2c(x1), axisy.p2c(axisy.min));
+ ctx.lineTo(axisx.p2c(x2), axisy.p2c(axisy.min));
+ continue;
+ }
+
+ // else it's a bit more complicated, there might
+ // be a flat maxed out rectangle first, then a
+ // triangular cutout or reverse; to find these
+ // keep track of the current x values
+ var x1old = x1, x2old = x2;
+
+ // clip the y values, without shortcutting, we
+ // go through all cases in turn
+
+ // clip with ymin
+ if (y1 <= y2 && y1 < axisy.min && y2 >= axisy.min) {
+ x1 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y1 = axisy.min;
+ }
+ else if (y2 <= y1 && y2 < axisy.min && y1 >= axisy.min) {
+ x2 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y2 = axisy.min;
+ }
+
+ // clip with ymax
+ if (y1 >= y2 && y1 > axisy.max && y2 <= axisy.max) {
+ x1 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y1 = axisy.max;
+ }
+ else if (y2 >= y1 && y2 > axisy.max && y1 <= axisy.max) {
+ x2 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y2 = axisy.max;
+ }
+
+ // if the x value was changed we got a rectangle
+ // to fill
+ if (x1 != x1old) {
+ ctx.lineTo(axisx.p2c(x1old), axisy.p2c(y1));
+ // it goes to (x1, y1), but we fill that below
+ }
+
+ // fill triangular section, this sometimes result
+ // in redundant points if (x1, y1) hasn't changed
+ // from previous line to, but we just ignore that
+ ctx.lineTo(axisx.p2c(x1), axisy.p2c(y1));
+ ctx.lineTo(axisx.p2c(x2), axisy.p2c(y2));
+
+ // fill the other rectangle if it's there
+ if (x2 != x2old) {
+ ctx.lineTo(axisx.p2c(x2), axisy.p2c(y2));
+ ctx.lineTo(axisx.p2c(x2old), axisy.p2c(y2));
+ }
+ }
+ }
+
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+ ctx.lineJoin = "round";
+
+ var lw = series.lines.lineWidth,
+ sw = series.shadowSize;
+ // FIXME: consider another form of shadow when filling is turned on
+ if (lw > 0 && sw > 0) {
+ // draw shadow as a thick and thin line with transparency
+ ctx.lineWidth = sw;
+ ctx.strokeStyle = "rgba(0,0,0,0.1)";
+ // position shadow at angle from the mid of line
+ var angle = Math.PI/18;
+ plotLine(series.datapoints, Math.sin(angle) * (lw/2 + sw/2), Math.cos(angle) * (lw/2 + sw/2), series.xaxis, series.yaxis);
+ ctx.lineWidth = sw/2;
+ plotLine(series.datapoints, Math.sin(angle) * (lw/2 + sw/4), Math.cos(angle) * (lw/2 + sw/4), series.xaxis, series.yaxis);
+ }
+
+ ctx.lineWidth = lw;
+ ctx.strokeStyle = series.color;
+ var fillStyle = getFillStyle(series.lines, series.color, 0, plotHeight);
+ if (fillStyle) {
+ ctx.fillStyle = fillStyle;
+ plotLineArea(series.datapoints, series.xaxis, series.yaxis);
+ }
+
+ if (lw > 0)
+ plotLine(series.datapoints, 0, 0, series.xaxis, series.yaxis);
+ ctx.restore();
+ }
+
+ function drawSeriesPoints(series) {
+ function plotPoints(datapoints, radius, fillStyle, offset, shadow, axisx, axisy, symbol) {
+ var points = datapoints.points, ps = datapoints.pointsize;
+
+ for (var i = 0; i < points.length; i += ps) {
+ var x = points[i], y = points[i + 1];
+ if (x == null || x < axisx.min || x > axisx.max || y < axisy.min || y > axisy.max)
+ continue;
+
+ ctx.beginPath();
+ x = axisx.p2c(x);
+ y = axisy.p2c(y) + offset;
+ if (symbol == "circle")
+ ctx.arc(x, y, radius, 0, shadow ? Math.PI : Math.PI * 2, false);
+ else
+ symbol(ctx, x, y, radius, shadow);
+ ctx.closePath();
+
+ if (fillStyle) {
+ ctx.fillStyle = fillStyle;
+ ctx.fill();
+ }
+ ctx.stroke();
+ }
+ }
+
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+
+ var lw = series.points.lineWidth,
+ sw = series.shadowSize,
+ radius = series.points.radius,
+ symbol = series.points.symbol;
+
+ // If the user sets the line width to 0, we change it to a very
+ // small value. A line width of 0 seems to force the default of 1.
+ // Doing the conditional here allows the shadow setting to still be
+ // optional even with a lineWidth of 0.
+
+ if( lw == 0 )
+ lw = 0.0001;
+
+ if (lw > 0 && sw > 0) {
+ // draw shadow in two steps
+ var w = sw / 2;
+ ctx.lineWidth = w;
+ ctx.strokeStyle = "rgba(0,0,0,0.1)";
+ plotPoints(series.datapoints, radius, null, w + w/2, true,
+ series.xaxis, series.yaxis, symbol);
+
+ ctx.strokeStyle = "rgba(0,0,0,0.2)";
+ plotPoints(series.datapoints, radius, null, w/2, true,
+ series.xaxis, series.yaxis, symbol);
+ }
+
+ ctx.lineWidth = lw;
+ ctx.strokeStyle = series.color;
+ plotPoints(series.datapoints, radius,
+ getFillStyle(series.points, series.color), 0, false,
+ series.xaxis, series.yaxis, symbol);
+ ctx.restore();
+ }
+
+ function drawBar(x, y, b, barLeft, barRight, fillStyleCallback, axisx, axisy, c, horizontal, lineWidth) {
+ var left, right, bottom, top,
+ drawLeft, drawRight, drawTop, drawBottom,
+ tmp;
+
+ // in horizontal mode, we start the bar from the left
+ // instead of from the bottom so it appears to be
+ // horizontal rather than vertical
+ if (horizontal) {
+ drawBottom = drawRight = drawTop = true;
+ drawLeft = false;
+ left = b;
+ right = x;
+ top = y + barLeft;
+ bottom = y + barRight;
+
+ // account for negative bars
+ if (right < left) {
+ tmp = right;
+ right = left;
+ left = tmp;
+ drawLeft = true;
+ drawRight = false;
+ }
+ }
+ else {
+ drawLeft = drawRight = drawTop = true;
+ drawBottom = false;
+ left = x + barLeft;
+ right = x + barRight;
+ bottom = b;
+ top = y;
+
+ // account for negative bars
+ if (top < bottom) {
+ tmp = top;
+ top = bottom;
+ bottom = tmp;
+ drawBottom = true;
+ drawTop = false;
+ }
+ }
+
+ // clip
+ if (right < axisx.min || left > axisx.max ||
+ top < axisy.min || bottom > axisy.max)
+ return;
+
+ if (left < axisx.min) {
+ left = axisx.min;
+ drawLeft = false;
+ }
+
+ if (right > axisx.max) {
+ right = axisx.max;
+ drawRight = false;
+ }
+
+ if (bottom < axisy.min) {
+ bottom = axisy.min;
+ drawBottom = false;
+ }
+
+ if (top > axisy.max) {
+ top = axisy.max;
+ drawTop = false;
+ }
+
+ left = axisx.p2c(left);
+ bottom = axisy.p2c(bottom);
+ right = axisx.p2c(right);
+ top = axisy.p2c(top);
+
+ // fill the bar
+ if (fillStyleCallback) {
+ c.fillStyle = fillStyleCallback(bottom, top);
+ c.fillRect(left, top, right - left, bottom - top)
+ }
+
+ // draw outline
+ if (lineWidth > 0 && (drawLeft || drawRight || drawTop || drawBottom)) {
+ c.beginPath();
+
+ // FIXME: inline moveTo is buggy with excanvas
+ c.moveTo(left, bottom);
+ if (drawLeft)
+ c.lineTo(left, top);
+ else
+ c.moveTo(left, top);
+ if (drawTop)
+ c.lineTo(right, top);
+ else
+ c.moveTo(right, top);
+ if (drawRight)
+ c.lineTo(right, bottom);
+ else
+ c.moveTo(right, bottom);
+ if (drawBottom)
+ c.lineTo(left, bottom);
+ else
+ c.moveTo(left, bottom);
+ c.stroke();
+ }
+ }
+
+ function drawSeriesBars(series) {
+ function plotBars(datapoints, barLeft, barRight, fillStyleCallback, axisx, axisy) {
+ var points = datapoints.points, ps = datapoints.pointsize;
+
+ for (var i = 0; i < points.length; i += ps) {
+ if (points[i] == null)
+ continue;
+ drawBar(points[i], points[i + 1], points[i + 2], barLeft, barRight, fillStyleCallback, axisx, axisy, ctx, series.bars.horizontal, series.bars.lineWidth);
+ }
+ }
+
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+
+ // FIXME: figure out a way to add shadows (for instance along the right edge)
+ ctx.lineWidth = series.bars.lineWidth;
+ ctx.strokeStyle = series.color;
+
+ var barLeft;
+
+ switch (series.bars.align) {
+ case "left":
+ barLeft = 0;
+ break;
+ case "right":
+ barLeft = -series.bars.barWidth;
+ break;
+ default:
+ barLeft = -series.bars.barWidth / 2;
+ }
+
+ var fillStyleCallback = series.bars.fill ? function (bottom, top) { return getFillStyle(series.bars, series.color, bottom, top); } : null;
+ plotBars(series.datapoints, barLeft, barLeft + series.bars.barWidth, fillStyleCallback, series.xaxis, series.yaxis);
+ ctx.restore();
+ }
+
+ function getFillStyle(filloptions, seriesColor, bottom, top) {
+ var fill = filloptions.fill;
+ if (!fill)
+ return null;
+
+ if (filloptions.fillColor)
+ return getColorOrGradient(filloptions.fillColor, bottom, top, seriesColor);
+
+ var c = $.color.parse(seriesColor);
+ c.a = typeof fill == "number" ? fill : 0.4;
+ c.normalize();
+ return c.toString();
+ }
+
+ function insertLegend() {
+
+ if (options.legend.container != null) {
+ $(options.legend.container).html("");
+ } else {
+ placeholder.find(".legend").remove();
+ }
+
+ if (!options.legend.show) {
+ return;
+ }
+
+ var fragments = [], entries = [], rowStarted = false,
+ lf = options.legend.labelFormatter, s, label;
+
+ // Build a list of legend entries, with each having a label and a color
+
+ for (var i = 0; i < series.length; ++i) {
+ s = series[i];
+ if (s.label) {
+ label = lf ? lf(s.label, s) : s.label;
+ if (label) {
+ entries.push({
+ label: label,
+ color: s.color
+ });
+ }
+ }
+ }
+
+ // Sort the legend using either the default or a custom comparator
+
+ if (options.legend.sorted) {
+ if ($.isFunction(options.legend.sorted)) {
+ entries.sort(options.legend.sorted);
+ } else if (options.legend.sorted == "reverse") {
+ entries.reverse();
+ } else {
+ var ascending = options.legend.sorted != "descending";
+ entries.sort(function(a, b) {
+ return a.label == b.label ? 0 : (
+ (a.label < b.label) != ascending ? 1 : -1 // Logical XOR
+ );
+ });
+ }
+ }
+
+ // Generate markup for the list of entries, in their final order
+
+ for (var i = 0; i < entries.length; ++i) {
+
+ var entry = entries[i];
+
+ if (i % options.legend.noColumns == 0) {
+ if (rowStarted)
+ fragments.push('</tr>');
+ fragments.push('<tr>');
+ rowStarted = true;
+ }
+
+ fragments.push(
+ '<td class="legendColorBox"><div style="border:1px solid ' + options.legend.labelBoxBorderColor + ';padding:1px"><div style="width:4px;height:0;border:5px solid ' + entry.color + ';overflow:hidden"></div></div></td>' +
+ '<td class="legendLabel">' + entry.label + '</td>'
+ );
+ }
+
+ if (rowStarted)
+ fragments.push('</tr>');
+
+ if (fragments.length == 0)
+ return;
+
+ var table = '<table style="font-size:smaller;color:' + options.grid.color + '">' + fragments.join("") + '</table>';
+ if (options.legend.container != null)
+ $(options.legend.container).html(table);
+ else {
+ var pos = "",
+ p = options.legend.position,
+ m = options.legend.margin;
+ if (m[0] == null)
+ m = [m, m];
+ if (p.charAt(0) == "n")
+ pos += 'top:' + (m[1] + plotOffset.top) + 'px;';
+ else if (p.charAt(0) == "s")
+ pos += 'bottom:' + (m[1] + plotOffset.bottom) + 'px;';
+ if (p.charAt(1) == "e")
+ pos += 'right:' + (m[0] + plotOffset.right) + 'px;';
+ else if (p.charAt(1) == "w")
+ pos += 'left:' + (m[0] + plotOffset.left) + 'px;';
+ var legend = $('<div class="legend">' + table.replace('style="', 'style="position:absolute;' + pos +';') + '</div>').appendTo(placeholder);
+ if (options.legend.backgroundOpacity != 0.0) {
+ // put in the transparent background
+ // separately to avoid blended labels and
+ // label boxes
+ var c = options.legend.backgroundColor;
+ if (c == null) {
+ c = options.grid.backgroundColor;
+ if (c && typeof c == "string")
+ c = $.color.parse(c);
+ else
+ c = $.color.extract(legend, 'background-color');
+ c.a = 1;
+ c = c.toString();
+ }
+ var div = legend.children();
+ $('<div style="position:absolute;width:' + div.width() + 'px;height:' + div.height() + 'px;' + pos +'background-color:' + c + ';"> </div>').prependTo(legend).css('opacity', options.legend.backgroundOpacity);
+ }
+ }
+ }
+
+
+ // interactive features
+
+ var highlights = [],
+ redrawTimeout = null;
+
+ // returns the data item the mouse is over, or null if none is found
+ function findNearbyItem(mouseX, mouseY, seriesFilter) {
+ var maxDistance = options.grid.mouseActiveRadius,
+ smallestDistance = maxDistance * maxDistance + 1,
+ item = null, foundPoint = false, i, j, ps;
+
+ for (i = series.length - 1; i >= 0; --i) {
+ if (!seriesFilter(series[i]))
+ continue;
+
+ var s = series[i],
+ axisx = s.xaxis,
+ axisy = s.yaxis,
+ points = s.datapoints.points,
+ mx = axisx.c2p(mouseX), // precompute some stuff to make the loop faster
+ my = axisy.c2p(mouseY),
+ maxx = maxDistance / axisx.scale,
+ maxy = maxDistance / axisy.scale;
+
+ ps = s.datapoints.pointsize;
+ // with inverse transforms, we can't use the maxx/maxy
+ // optimization, sadly
+ if (axisx.options.inverseTransform)
+ maxx = Number.MAX_VALUE;
+ if (axisy.options.inverseTransform)
+ maxy = Number.MAX_VALUE;
+
+ if (s.lines.show || s.points.show) {
+ for (j = 0; j < points.length; j += ps) {
+ var x = points[j], y = points[j + 1];
+ if (x == null)
+ continue;
+
+ // For points and lines, the cursor must be within a
+ // certain distance to the data point
+ if (x - mx > maxx || x - mx < -maxx ||
+ y - my > maxy || y - my < -maxy)
+ continue;
+
+ // We have to calculate distances in pixels, not in
+ // data units, because the scales of the axes may be different
+ var dx = Math.abs(axisx.p2c(x) - mouseX),
+ dy = Math.abs(axisy.p2c(y) - mouseY),
+ dist = dx * dx + dy * dy; // we save the sqrt
+
+ // use <= to ensure last point takes precedence
+ // (last generally means on top of)
+ if (dist < smallestDistance) {
+ smallestDistance = dist;
+ item = [i, j / ps];
+ }
+ }
+ }
+
+ if (s.bars.show && !item) { // no other point can be nearby
+
+ var barLeft, barRight;
+
+ switch (s.bars.align) {
+ case "left":
+ barLeft = 0;
+ break;
+ case "right":
+ barLeft = -s.bars.barWidth;
+ break;
+ default:
+ barLeft = -s.bars.barWidth / 2;
+ }
+
+ barRight = barLeft + s.bars.barWidth;
+
+ for (j = 0; j < points.length; j += ps) {
+ var x = points[j], y = points[j + 1], b = points[j + 2];
+ if (x == null)
+ continue;
+
+ // for a bar graph, the cursor must be inside the bar
+ if (series[i].bars.horizontal ?
+ (mx <= Math.max(b, x) && mx >= Math.min(b, x) &&
+ my >= y + barLeft && my <= y + barRight) :
+ (mx >= x + barLeft && mx <= x + barRight &&
+ my >= Math.min(b, y) && my <= Math.max(b, y)))
+ item = [i, j / ps];
+ }
+ }
+ }
+
+ if (item) {
+ i = item[0];
+ j = item[1];
+ ps = series[i].datapoints.pointsize;
+
+ return { datapoint: series[i].datapoints.points.slice(j * ps, (j + 1) * ps),
+ dataIndex: j,
+ series: series[i],
+ seriesIndex: i };
+ }
+
+ return null;
+ }
+
+ function onMouseMove(e) {
+ if (options.grid.hoverable)
+ triggerClickHoverEvent("plothover", e,
+ function (s) { return s["hoverable"] != false; });
+ }
+
+ function onMouseLeave(e) {
+ if (options.grid.hoverable)
+ triggerClickHoverEvent("plothover", e,
+ function (s) { return false; });
+ }
+
+ function onClick(e) {
+ triggerClickHoverEvent("plotclick", e,
+ function (s) { return s["clickable"] != false; });
+ }
+
+ // trigger click or hover event (they send the same parameters
+ // so we share their code)
+ function triggerClickHoverEvent(eventname, event, seriesFilter) {
+ var offset = eventHolder.offset(),
+ canvasX = event.pageX - offset.left - plotOffset.left,
+ canvasY = event.pageY - offset.top - plotOffset.top,
+ pos = canvasToAxisCoords({ left: canvasX, top: canvasY });
+
+ pos.pageX = event.pageX;
+ pos.pageY = event.pageY;
+
+ var item = findNearbyItem(canvasX, canvasY, seriesFilter);
+
+ if (item) {
+ // fill in mouse pos for any listeners out there
+ item.pageX = parseInt(item.series.xaxis.p2c(item.datapoint[0]) + offset.left + plotOffset.left, 10);
+ item.pageY = parseInt(item.series.yaxis.p2c(item.datapoint[1]) + offset.top + plotOffset.top, 10);
+ }
+
+ if (options.grid.autoHighlight) {
+ // clear auto-highlights
+ for (var i = 0; i < highlights.length; ++i) {
+ var h = highlights[i];
+ if (h.auto == eventname &&
+ !(item && h.series == item.series &&
+ h.point[0] == item.datapoint[0] &&
+ h.point[1] == item.datapoint[1]))
+ unhighlight(h.series, h.point);
+ }
+
+ if (item)
+ highlight(item.series, item.datapoint, eventname);
+ }
+
+ placeholder.trigger(eventname, [ pos, item ]);
+ }
+
+ function triggerRedrawOverlay() {
+ var t = options.interaction.redrawOverlayInterval;
+ if (t == -1) { // skip event queue
+ drawOverlay();
+ return;
+ }
+
+ if (!redrawTimeout)
+ redrawTimeout = setTimeout(drawOverlay, t);
+ }
+
+ function drawOverlay() {
+ redrawTimeout = null;
+
+ // draw highlights
+ octx.save();
+ overlay.clear();
+ octx.translate(plotOffset.left, plotOffset.top);
+
+ var i, hi;
+ for (i = 0; i < highlights.length; ++i) {
+ hi = highlights[i];
+
+ if (hi.series.bars.show)
+ drawBarHighlight(hi.series, hi.point);
+ else
+ drawPointHighlight(hi.series, hi.point);
+ }
+ octx.restore();
+
+ executeHooks(hooks.drawOverlay, [octx]);
+ }
+
+ function highlight(s, point, auto) {
+ if (typeof s == "number")
+ s = series[s];
+
+ if (typeof point == "number") {
+ var ps = s.datapoints.pointsize;
+ point = s.datapoints.points.slice(ps * point, ps * (point + 1));
+ }
+
+ var i = indexOfHighlight(s, point);
+ if (i == -1) {
+ highlights.push({ series: s, point: point, auto: auto });
+
+ triggerRedrawOverlay();
+ }
+ else if (!auto)
+ highlights[i].auto = false;
+ }
+
+ function unhighlight(s, point) {
+ if (s == null && point == null) {
+ highlights = [];
+ triggerRedrawOverlay();
+ return;
+ }
+
+ if (typeof s == "number")
+ s = series[s];
+
+ if (typeof point == "number") {
+ var ps = s.datapoints.pointsize;
+ point = s.datapoints.points.slice(ps * point, ps * (point + 1));
+ }
+
+ var i = indexOfHighlight(s, point);
+ if (i != -1) {
+ highlights.splice(i, 1);
+
+ triggerRedrawOverlay();
+ }
+ }
+
+ function indexOfHighlight(s, p) {
+ for (var i = 0; i < highlights.length; ++i) {
+ var h = highlights[i];
+ if (h.series == s && h.point[0] == p[0]
+ && h.point[1] == p[1])
+ return i;
+ }
+ return -1;
+ }
+
+ function drawPointHighlight(series, point) {
+ var x = point[0], y = point[1],
+ axisx = series.xaxis, axisy = series.yaxis,
+ highlightColor = (typeof series.highlightColor === "string") ? series.highlightColor : $.color.parse(series.color).scale('a', 0.5).toString();
+
+ if (x < axisx.min || x > axisx.max || y < axisy.min || y > axisy.max)
+ return;
+
+ var pointRadius = series.points.radius + series.points.lineWidth / 2;
+ octx.lineWidth = pointRadius;
+ octx.strokeStyle = highlightColor;
+ var radius = 1.5 * pointRadius;
+ x = axisx.p2c(x);
+ y = axisy.p2c(y);
+
+ octx.beginPath();
+ if (series.points.symbol == "circle")
+ octx.arc(x, y, radius, 0, 2 * Math.PI, false);
+ else
+ series.points.symbol(octx, x, y, radius, false);
+ octx.closePath();
+ octx.stroke();
+ }
+
+ function drawBarHighlight(series, point) {
+ var highlightColor = (typeof series.highlightColor === "string") ? series.highlightColor : $.color.parse(series.color).scale('a', 0.5).toString(),
+ fillStyle = highlightColor,
+ barLeft;
+
+ switch (series.bars.align) {
+ case "left":
+ barLeft = 0;
+ break;
+ case "right":
+ barLeft = -series.bars.barWidth;
+ break;
+ default:
+ barLeft = -series.bars.barWidth / 2;
+ }
+
+ octx.lineWidth = series.bars.lineWidth;
+ octx.strokeStyle = highlightColor;
+
+ drawBar(point[0], point[1], point[2] || 0, barLeft, barLeft + series.bars.barWidth,
+ function () { return fillStyle; }, series.xaxis, series.yaxis, octx, series.bars.horizontal, series.bars.lineWidth);
+ }
+
+ function getColorOrGradient(spec, bottom, top, defaultColor) {
+ if (typeof spec == "string")
+ return spec;
+ else {
+ // assume this is a gradient spec; IE currently only
+ // supports a simple vertical gradient properly, so that's
+ // what we support too
+ var gradient = ctx.createLinearGradient(0, top, 0, bottom);
+
+ for (var i = 0, l = spec.colors.length; i < l; ++i) {
+ var c = spec.colors[i];
+ if (typeof c != "string") {
+ var co = $.color.parse(defaultColor);
+ if (c.brightness != null)
+ co = co.scale('rgb', c.brightness);
+ if (c.opacity != null)
+ co.a *= c.opacity;
+ c = co.toString();
+ }
+ gradient.addColorStop(i / (l - 1), c);
+ }
+
+ return gradient;
+ }
+ }
+ }
+
+ // Add the plot function to the top level of the jQuery object
+
+ $.plot = function(placeholder, data, options) {
+ //var t0 = new Date();
+ var plot = new Plot($(placeholder), data, options, $.plot.plugins);
+ //(window.console ? console.log : alert)("time used (msecs): " + ((new Date()).getTime() - t0.getTime()));
+ return plot;
+ };
+
+ $.plot.version = "0.8.3";
+
+ $.plot.plugins = [];
+
+ // Also add the plot function as a chainable property
+
+ $.fn.plot = function(data, options) {
+ return this.each(function() {
+ $.plot(this, data, options);
+ });
+ };
+
+ // round to nearby lower multiple of base
+ function floorInBase(n, base) {
+ return base * Math.floor(n / base);
+ }
+
+})(jQuery);
diff --git a/qa/workunits/erasure-code/jquery.js b/qa/workunits/erasure-code/jquery.js
new file mode 100644
index 000000000..8c24ffc61
--- /dev/null
+++ b/qa/workunits/erasure-code/jquery.js
@@ -0,0 +1,9472 @@
+/*!
+ * jQuery JavaScript Library v1.8.3
+ * http://jquery.com/
+ *
+ * Includes Sizzle.js
+ * http://sizzlejs.com/
+ *
+ * Copyright 2012 jQuery Foundation and other contributors
+ * Released under the MIT license
+ * http://jquery.org/license
+ *
+ * Date: Tue Nov 13 2012 08:20:33 GMT-0500 (Eastern Standard Time)
+ */
+(function( window, undefined ) {
+var
+ // A central reference to the root jQuery(document)
+ rootjQuery,
+
+ // The deferred used on DOM ready
+ readyList,
+
+ // Use the correct document accordingly with window argument (sandbox)
+ document = window.document,
+ location = window.location,
+ navigator = window.navigator,
+
+ // Map over jQuery in case of overwrite
+ _jQuery = window.jQuery,
+
+ // Map over the $ in case of overwrite
+ _$ = window.$,
+
+ // Save a reference to some core methods
+ core_push = Array.prototype.push,
+ core_slice = Array.prototype.slice,
+ core_indexOf = Array.prototype.indexOf,
+ core_toString = Object.prototype.toString,
+ core_hasOwn = Object.prototype.hasOwnProperty,
+ core_trim = String.prototype.trim,
+
+ // Define a local copy of jQuery
+ jQuery = function( selector, context ) {
+ // The jQuery object is actually just the init constructor 'enhanced'
+ return new jQuery.fn.init( selector, context, rootjQuery );
+ },
+
+ // Used for matching numbers
+ core_pnum = /[\-+]?(?:\d*\.|)\d+(?:[eE][\-+]?\d+|)/.source,
+
+ // Used for detecting and trimming whitespace
+ core_rnotwhite = /\S/,
+ core_rspace = /\s+/,
+
+ // Make sure we trim BOM and NBSP (here's looking at you, Safari 5.0 and IE)
+ rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,
+
+ // A simple way to check for HTML strings
+ // Prioritize #id over <tag> to avoid XSS via location.hash (#9521)
+ rquickExpr = /^(?:[^#<]*(<[\w\W]+>)[^>]*$|#([\w\-]*)$)/,
+
+ // Match a standalone tag
+ rsingleTag = /^<(\w+)\s*\/?>(?:<\/\1>|)$/,
+
+ // JSON RegExp
+ rvalidchars = /^[\],:{}\s]*$/,
+ rvalidbraces = /(?:^|:|,)(?:\s*\[)+/g,
+ rvalidescape = /\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/g,
+ rvalidtokens = /"[^"\\\r\n]*"|true|false|null|-?(?:\d\d*\.|)\d+(?:[eE][\-+]?\d+|)/g,
+
+ // Matches dashed string for camelizing
+ rmsPrefix = /^-ms-/,
+ rdashAlpha = /-([\da-z])/gi,
+
+ // Used by jQuery.camelCase as callback to replace()
+ fcamelCase = function( all, letter ) {
+ return ( letter + "" ).toUpperCase();
+ },
+
+ // The ready event handler and self cleanup method
+ DOMContentLoaded = function() {
+ if ( document.addEventListener ) {
+ document.removeEventListener( "DOMContentLoaded", DOMContentLoaded, false );
+ jQuery.ready();
+ } else if ( document.readyState === "complete" ) {
+ // we're here because readyState === "complete" in oldIE
+ // which is good enough for us to call the dom ready!
+ document.detachEvent( "onreadystatechange", DOMContentLoaded );
+ jQuery.ready();
+ }
+ },
+
+ // [[Class]] -> type pairs
+ class2type = {};
+
+jQuery.fn = jQuery.prototype = {
+ constructor: jQuery,
+ init: function( selector, context, rootjQuery ) {
+ var match, elem, ret, doc;
+
+ // Handle $(""), $(null), $(undefined), $(false)
+ if ( !selector ) {
+ return this;
+ }
+
+ // Handle $(DOMElement)
+ if ( selector.nodeType ) {
+ this.context = this[0] = selector;
+ this.length = 1;
+ return this;
+ }
+
+ // Handle HTML strings
+ if ( typeof selector === "string" ) {
+ if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) {
+ // Assume that strings that start and end with <> are HTML and skip the regex check
+ match = [ null, selector, null ];
+
+ } else {
+ match = rquickExpr.exec( selector );
+ }
+
+ // Match html or make sure no context is specified for #id
+ if ( match && (match[1] || !context) ) {
+
+ // HANDLE: $(html) -> $(array)
+ if ( match[1] ) {
+ context = context instanceof jQuery ? context[0] : context;
+ doc = ( context && context.nodeType ? context.ownerDocument || context : document );
+
+ // scripts is true for back-compat
+ selector = jQuery.parseHTML( match[1], doc, true );
+ if ( rsingleTag.test( match[1] ) && jQuery.isPlainObject( context ) ) {
+ this.attr.call( selector, context, true );
+ }
+
+ return jQuery.merge( this, selector );
+
+ // HANDLE: $(#id)
+ } else {
+ elem = document.getElementById( match[2] );
+
+ // Check parentNode to catch when Blackberry 4.6 returns
+ // nodes that are no longer in the document #6963
+ if ( elem && elem.parentNode ) {
+ // Handle the case where IE and Opera return items
+ // by name instead of ID
+ if ( elem.id !== match[2] ) {
+ return rootjQuery.find( selector );
+ }
+
+ // Otherwise, we inject the element directly into the jQuery object
+ this.length = 1;
+ this[0] = elem;
+ }
+
+ this.context = document;
+ this.selector = selector;
+ return this;
+ }
+
+ // HANDLE: $(expr, $(...))
+ } else if ( !context || context.jquery ) {
+ return ( context || rootjQuery ).find( selector );
+
+ // HANDLE: $(expr, context)
+ // (which is just equivalent to: $(context).find(expr)
+ } else {
+ return this.constructor( context ).find( selector );
+ }
+
+ // HANDLE: $(function)
+ // Shortcut for document ready
+ } else if ( jQuery.isFunction( selector ) ) {
+ return rootjQuery.ready( selector );
+ }
+
+ if ( selector.selector !== undefined ) {
+ this.selector = selector.selector;
+ this.context = selector.context;
+ }
+
+ return jQuery.makeArray( selector, this );
+ },
+
+ // Start with an empty selector
+ selector: "",
+
+ // The current version of jQuery being used
+ jquery: "1.8.3",
+
+ // The default length of a jQuery object is 0
+ length: 0,
+
+ // The number of elements contained in the matched element set
+ size: function() {
+ return this.length;
+ },
+
+ toArray: function() {
+ return core_slice.call( this );
+ },
+
+ // Get the Nth element in the matched element set OR
+ // Get the whole matched element set as a clean array
+ get: function( num ) {
+ return num == null ?
+
+ // Return a 'clean' array
+ this.toArray() :
+
+ // Return just the object
+ ( num < 0 ? this[ this.length + num ] : this[ num ] );
+ },
+
+ // Take an array of elements and push it onto the stack
+ // (returning the new matched element set)
+ pushStack: function( elems, name, selector ) {
+
+ // Build a new jQuery matched element set
+ var ret = jQuery.merge( this.constructor(), elems );
+
+ // Add the old object onto the stack (as a reference)
+ ret.prevObject = this;
+
+ ret.context = this.context;
+
+ if ( name === "find" ) {
+ ret.selector = this.selector + ( this.selector ? " " : "" ) + selector;
+ } else if ( name ) {
+ ret.selector = this.selector + "." + name + "(" + selector + ")";
+ }
+
+ // Return the newly-formed element set
+ return ret;
+ },
+
+ // Execute a callback for every element in the matched set.
+ // (You can seed the arguments with an array of args, but this is
+ // only used internally.)
+ each: function( callback, args ) {
+ return jQuery.each( this, callback, args );
+ },
+
+ ready: function( fn ) {
+ // Add the callback
+ jQuery.ready.promise().done( fn );
+
+ return this;
+ },
+
+ eq: function( i ) {
+ i = +i;
+ return i === -1 ?
+ this.slice( i ) :
+ this.slice( i, i + 1 );
+ },
+
+ first: function() {
+ return this.eq( 0 );
+ },
+
+ last: function() {
+ return this.eq( -1 );
+ },
+
+ slice: function() {
+ return this.pushStack( core_slice.apply( this, arguments ),
+ "slice", core_slice.call(arguments).join(",") );
+ },
+
+ map: function( callback ) {
+ return this.pushStack( jQuery.map(this, function( elem, i ) {
+ return callback.call( elem, i, elem );
+ }));
+ },
+
+ end: function() {
+ return this.prevObject || this.constructor(null);
+ },
+
+ // For internal use only.
+ // Behaves like an Array's method, not like a jQuery method.
+ push: core_push,
+ sort: [].sort,
+ splice: [].splice
+};
+
+// Give the init function the jQuery prototype for later instantiation
+jQuery.fn.init.prototype = jQuery.fn;
+
+jQuery.extend = jQuery.fn.extend = function() {
+ var options, name, src, copy, copyIsArray, clone,
+ target = arguments[0] || {},
+ i = 1,
+ length = arguments.length,
+ deep = false;
+
+ // Handle a deep copy situation
+ if ( typeof target === "boolean" ) {
+ deep = target;
+ target = arguments[1] || {};
+ // skip the boolean and the target
+ i = 2;
+ }
+
+ // Handle case when target is a string or something (possible in deep copy)
+ if ( typeof target !== "object" && !jQuery.isFunction(target) ) {
+ target = {};
+ }
+
+ // extend jQuery itself if only one argument is passed
+ if ( length === i ) {
+ target = this;
+ --i;
+ }
+
+ for ( ; i < length; i++ ) {
+ // Only deal with non-null/undefined values
+ if ( (options = arguments[ i ]) != null ) {
+ // Extend the base object
+ for ( name in options ) {
+ src = target[ name ];
+ copy = options[ name ];
+
+ // Prevent never-ending loop
+ if ( target === copy ) {
+ continue;
+ }
+
+ // Recurse if we're merging plain objects or arrays
+ if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) {
+ if ( copyIsArray ) {
+ copyIsArray = false;
+ clone = src && jQuery.isArray(src) ? src : [];
+
+ } else {
+ clone = src && jQuery.isPlainObject(src) ? src : {};
+ }
+
+ // Never move original objects, clone them
+ target[ name ] = jQuery.extend( deep, clone, copy );
+
+ // Don't bring in undefined values
+ } else if ( copy !== undefined ) {
+ target[ name ] = copy;
+ }
+ }
+ }
+ }
+
+ // Return the modified object
+ return target;
+};
+
+jQuery.extend({
+ noConflict: function( deep ) {
+ if ( window.$ === jQuery ) {
+ window.$ = _$;
+ }
+
+ if ( deep && window.jQuery === jQuery ) {
+ window.jQuery = _jQuery;
+ }
+
+ return jQuery;
+ },
+
+ // Is the DOM ready to be used? Set to true once it occurs.
+ isReady: false,
+
+ // A counter to track how many items to wait for before
+ // the ready event fires. See #6781
+ readyWait: 1,
+
+ // Hold (or release) the ready event
+ holdReady: function( hold ) {
+ if ( hold ) {
+ jQuery.readyWait++;
+ } else {
+ jQuery.ready( true );
+ }
+ },
+
+ // Handle when the DOM is ready
+ ready: function( wait ) {
+
+ // Abort if there are pending holds or we're already ready
+ if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) {
+ return;
+ }
+
+ // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443).
+ if ( !document.body ) {
+ return setTimeout( jQuery.ready, 1 );
+ }
+
+ // Remember that the DOM is ready
+ jQuery.isReady = true;
+
+ // If a normal DOM Ready event fired, decrement, and wait if need be
+ if ( wait !== true && --jQuery.readyWait > 0 ) {
+ return;
+ }
+
+ // If there are functions bound, to execute
+ readyList.resolveWith( document, [ jQuery ] );
+
+ // Trigger any bound ready events
+ if ( jQuery.fn.trigger ) {
+ jQuery( document ).trigger("ready").off("ready");
+ }
+ },
+
+ // See test/unit/core.js for details concerning isFunction.
+ // Since version 1.3, DOM methods and functions like alert
+ // aren't supported. They return false on IE (#2968).
+ isFunction: function( obj ) {
+ return jQuery.type(obj) === "function";
+ },
+
+ isArray: Array.isArray || function( obj ) {
+ return jQuery.type(obj) === "array";
+ },
+
+ isWindow: function( obj ) {
+ return obj != null && obj == obj.window;
+ },
+
+ isNumeric: function( obj ) {
+ return !isNaN( parseFloat(obj) ) && isFinite( obj );
+ },
+
+ type: function( obj ) {
+ return obj == null ?
+ String( obj ) :
+ class2type[ core_toString.call(obj) ] || "object";
+ },
+
+ isPlainObject: function( obj ) {
+ // Must be an Object.
+ // Because of IE, we also have to check the presence of the constructor property.
+ // Make sure that DOM nodes and window objects don't pass through, as well
+ if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) {
+ return false;
+ }
+
+ try {
+ // Not own constructor property must be Object
+ if ( obj.constructor &&
+ !core_hasOwn.call(obj, "constructor") &&
+ !core_hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) {
+ return false;
+ }
+ } catch ( e ) {
+ // IE8,9 Will throw exceptions on certain host objects #9897
+ return false;
+ }
+
+ // Own properties are enumerated firstly, so to speed up,
+ // if last one is own, then all properties are own.
+
+ var key;
+ for ( key in obj ) {}
+
+ return key === undefined || core_hasOwn.call( obj, key );
+ },
+
+ isEmptyObject: function( obj ) {
+ var name;
+ for ( name in obj ) {
+ return false;
+ }
+ return true;
+ },
+
+ error: function( msg ) {
+ throw new Error( msg );
+ },
+
+ // data: string of html
+ // context (optional): If specified, the fragment will be created in this context, defaults to document
+ // scripts (optional): If true, will include scripts passed in the html string
+ parseHTML: function( data, context, scripts ) {
+ var parsed;
+ if ( !data || typeof data !== "string" ) {
+ return null;
+ }
+ if ( typeof context === "boolean" ) {
+ scripts = context;
+ context = 0;
+ }
+ context = context || document;
+
+ // Single tag
+ if ( (parsed = rsingleTag.exec( data )) ) {
+ return [ context.createElement( parsed[1] ) ];
+ }
+
+ parsed = jQuery.buildFragment( [ data ], context, scripts ? null : [] );
+ return jQuery.merge( [],
+ (parsed.cacheable ? jQuery.clone( parsed.fragment ) : parsed.fragment).childNodes );
+ },
+
+ parseJSON: function( data ) {
+ if ( !data || typeof data !== "string") {
+ return null;
+ }
+
+ // Make sure leading/trailing whitespace is removed (IE can't handle it)
+ data = jQuery.trim( data );
+
+ // Attempt to parse using the native JSON parser first
+ if ( window.JSON && window.JSON.parse ) {
+ return window.JSON.parse( data );
+ }
+
+ // Make sure the incoming data is actual JSON
+ // Logic borrowed from http://json.org/json2.js
+ if ( rvalidchars.test( data.replace( rvalidescape, "@" )
+ .replace( rvalidtokens, "]" )
+ .replace( rvalidbraces, "")) ) {
+
+ return ( new Function( "return " + data ) )();
+
+ }
+ jQuery.error( "Invalid JSON: " + data );
+ },
+
+ // Cross-browser xml parsing
+ parseXML: function( data ) {
+ var xml, tmp;
+ if ( !data || typeof data !== "string" ) {
+ return null;
+ }
+ try {
+ if ( window.DOMParser ) { // Standard
+ tmp = new DOMParser();
+ xml = tmp.parseFromString( data , "text/xml" );
+ } else { // IE
+ xml = new ActiveXObject( "Microsoft.XMLDOM" );
+ xml.async = "false";
+ xml.loadXML( data );
+ }
+ } catch( e ) {
+ xml = undefined;
+ }
+ if ( !xml || !xml.documentElement || xml.getElementsByTagName( "parsererror" ).length ) {
+ jQuery.error( "Invalid XML: " + data );
+ }
+ return xml;
+ },
+
+ noop: function() {},
+
+ // Evaluates a script in a global context
+ // Workarounds based on findings by Jim Driscoll
+ // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context
+ globalEval: function( data ) {
+ if ( data && core_rnotwhite.test( data ) ) {
+ // We use execScript on Internet Explorer
+ // We use an anonymous function so that context is window
+ // rather than jQuery in Firefox
+ ( window.execScript || function( data ) {
+ window[ "eval" ].call( window, data );
+ } )( data );
+ }
+ },
+
+ // Convert dashed to camelCase; used by the css and data modules
+ // Microsoft forgot to hump their vendor prefix (#9572)
+ camelCase: function( string ) {
+ return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase );
+ },
+
+ nodeName: function( elem, name ) {
+ return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase();
+ },
+
+ // args is for internal usage only
+ each: function( obj, callback, args ) {
+ var name,
+ i = 0,
+ length = obj.length,
+ isObj = length === undefined || jQuery.isFunction( obj );
+
+ if ( args ) {
+ if ( isObj ) {
+ for ( name in obj ) {
+ if ( callback.apply( obj[ name ], args ) === false ) {
+ break;
+ }
+ }
+ } else {
+ for ( ; i < length; ) {
+ if ( callback.apply( obj[ i++ ], args ) === false ) {
+ break;
+ }
+ }
+ }
+
+ // A special, fast, case for the most common use of each
+ } else {
+ if ( isObj ) {
+ for ( name in obj ) {
+ if ( callback.call( obj[ name ], name, obj[ name ] ) === false ) {
+ break;
+ }
+ }
+ } else {
+ for ( ; i < length; ) {
+ if ( callback.call( obj[ i ], i, obj[ i++ ] ) === false ) {
+ break;
+ }
+ }
+ }
+ }
+
+ return obj;
+ },
+
+ // Use native String.trim function wherever possible
+ trim: core_trim && !core_trim.call("\uFEFF\xA0") ?
+ function( text ) {
+ return text == null ?
+ "" :
+ core_trim.call( text );
+ } :
+
+ // Otherwise use our own trimming functionality
+ function( text ) {
+ return text == null ?
+ "" :
+ ( text + "" ).replace( rtrim, "" );
+ },
+
+ // results is for internal usage only
+ makeArray: function( arr, results ) {
+ var type,
+ ret = results || [];
+
+ if ( arr != null ) {
+ // The window, strings (and functions) also have 'length'
+ // Tweaked logic slightly to handle Blackberry 4.7 RegExp issues #6930
+ type = jQuery.type( arr );
+
+ if ( arr.length == null || type === "string" || type === "function" || type === "regexp" || jQuery.isWindow( arr ) ) {
+ core_push.call( ret, arr );
+ } else {
+ jQuery.merge( ret, arr );
+ }
+ }
+
+ return ret;
+ },
+
+ inArray: function( elem, arr, i ) {
+ var len;
+
+ if ( arr ) {
+ if ( core_indexOf ) {
+ return core_indexOf.call( arr, elem, i );
+ }
+
+ len = arr.length;
+ i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0;
+
+ for ( ; i < len; i++ ) {
+ // Skip accessing in sparse arrays
+ if ( i in arr && arr[ i ] === elem ) {
+ return i;
+ }
+ }
+ }
+
+ return -1;
+ },
+
+ merge: function( first, second ) {
+ var l = second.length,
+ i = first.length,
+ j = 0;
+
+ if ( typeof l === "number" ) {
+ for ( ; j < l; j++ ) {
+ first[ i++ ] = second[ j ];
+ }
+
+ } else {
+ while ( second[j] !== undefined ) {
+ first[ i++ ] = second[ j++ ];
+ }
+ }
+
+ first.length = i;
+
+ return first;
+ },
+
+ grep: function( elems, callback, inv ) {
+ var retVal,
+ ret = [],
+ i = 0,
+ length = elems.length;
+ inv = !!inv;
+
+ // Go through the array, only saving the items
+ // that pass the validator function
+ for ( ; i < length; i++ ) {
+ retVal = !!callback( elems[ i ], i );
+ if ( inv !== retVal ) {
+ ret.push( elems[ i ] );
+ }
+ }
+
+ return ret;
+ },
+
+ // arg is for internal usage only
+ map: function( elems, callback, arg ) {
+ var value, key,
+ ret = [],
+ i = 0,
+ length = elems.length,
+ // jquery objects are treated as arrays
+ isArray = elems instanceof jQuery || length !== undefined && typeof length === "number" && ( ( length > 0 && elems[ 0 ] && elems[ length -1 ] ) || length === 0 || jQuery.isArray( elems ) ) ;
+
+ // Go through the array, translating each of the items to their
+ if ( isArray ) {
+ for ( ; i < length; i++ ) {
+ value = callback( elems[ i ], i, arg );
+
+ if ( value != null ) {
+ ret[ ret.length ] = value;
+ }
+ }
+
+ // Go through every key on the object,
+ } else {
+ for ( key in elems ) {
+ value = callback( elems[ key ], key, arg );
+
+ if ( value != null ) {
+ ret[ ret.length ] = value;
+ }
+ }
+ }
+
+ // Flatten any nested arrays
+ return ret.concat.apply( [], ret );
+ },
+
+ // A global GUID counter for objects
+ guid: 1,
+
+ // Bind a function to a context, optionally partially applying any
+ // arguments.
+ proxy: function( fn, context ) {
+ var tmp, args, proxy;
+
+ if ( typeof context === "string" ) {
+ tmp = fn[ context ];
+ context = fn;
+ fn = tmp;
+ }
+
+ // Quick check to determine if target is callable, in the spec
+ // this throws a TypeError, but we will just return undefined.
+ if ( !jQuery.isFunction( fn ) ) {
+ return undefined;
+ }
+
+ // Simulated bind
+ args = core_slice.call( arguments, 2 );
+ proxy = function() {
+ return fn.apply( context, args.concat( core_slice.call( arguments ) ) );
+ };
+
+ // Set the guid of unique handler to the same of original handler, so it can be removed
+ proxy.guid = fn.guid = fn.guid || jQuery.guid++;
+
+ return proxy;
+ },
+
+ // Multifunctional method to get and set values of a collection
+ // The value/s can optionally be executed if it's a function
+ access: function( elems, fn, key, value, chainable, emptyGet, pass ) {
+ var exec,
+ bulk = key == null,
+ i = 0,
+ length = elems.length;
+
+ // Sets many values
+ if ( key && typeof key === "object" ) {
+ for ( i in key ) {
+ jQuery.access( elems, fn, i, key[i], 1, emptyGet, value );
+ }
+ chainable = 1;
+
+ // Sets one value
+ } else if ( value !== undefined ) {
+ // Optionally, function values get executed if exec is true
+ exec = pass === undefined && jQuery.isFunction( value );
+
+ if ( bulk ) {
+ // Bulk operations only iterate when executing function values
+ if ( exec ) {
+ exec = fn;
+ fn = function( elem, key, value ) {
+ return exec.call( jQuery( elem ), value );
+ };
+
+ // Otherwise they run against the entire set
+ } else {
+ fn.call( elems, value );
+ fn = null;
+ }
+ }
+
+ if ( fn ) {
+ for (; i < length; i++ ) {
+ fn( elems[i], key, exec ? value.call( elems[i], i, fn( elems[i], key ) ) : value, pass );
+ }
+ }
+
+ chainable = 1;
+ }
+
+ return chainable ?
+ elems :
+
+ // Gets
+ bulk ?
+ fn.call( elems ) :
+ length ? fn( elems[0], key ) : emptyGet;
+ },
+
+ now: function() {
+ return ( new Date() ).getTime();
+ }
+});
+
+jQuery.ready.promise = function( obj ) {
+ if ( !readyList ) {
+
+ readyList = jQuery.Deferred();
+
+ // Catch cases where $(document).ready() is called after the browser event has already occurred.
+ // we once tried to use readyState "interactive" here, but it caused issues like the one
+ // discovered by ChrisS here: http://bugs.jquery.com/ticket/12282#comment:15
+ if ( document.readyState === "complete" ) {
+ // Handle it asynchronously to allow scripts the opportunity to delay ready
+ setTimeout( jQuery.ready, 1 );
+
+ // Standards-based browsers support DOMContentLoaded
+ } else if ( document.addEventListener ) {
+ // Use the handy event callback
+ document.addEventListener( "DOMContentLoaded", DOMContentLoaded, false );
+
+ // A fallback to window.onload, that will always work
+ window.addEventListener( "load", jQuery.ready, false );
+
+ // If IE event model is used
+ } else {
+ // Ensure firing before onload, maybe late but safe also for iframes
+ document.attachEvent( "onreadystatechange", DOMContentLoaded );
+
+ // A fallback to window.onload, that will always work
+ window.attachEvent( "onload", jQuery.ready );
+
+ // If IE and not a frame
+ // continually check to see if the document is ready
+ var top = false;
+
+ try {
+ top = window.frameElement == null && document.documentElement;
+ } catch(e) {}
+
+ if ( top && top.doScroll ) {
+ (function doScrollCheck() {
+ if ( !jQuery.isReady ) {
+
+ try {
+ // Use the trick by Diego Perini
+ // http://javascript.nwbox.com/IEContentLoaded/
+ top.doScroll("left");
+ } catch(e) {
+ return setTimeout( doScrollCheck, 50 );
+ }
+
+ // and execute any waiting functions
+ jQuery.ready();
+ }
+ })();
+ }
+ }
+ }
+ return readyList.promise( obj );
+};
+
+// Populate the class2type map
+jQuery.each("Boolean Number String Function Array Date RegExp Object".split(" "), function(i, name) {
+ class2type[ "[object " + name + "]" ] = name.toLowerCase();
+});
+
+// All jQuery objects should point back to these
+rootjQuery = jQuery(document);
+// String to Object options format cache
+var optionsCache = {};
+
+// Convert String-formatted options into Object-formatted ones and store in cache
+function createOptions( options ) {
+ var object = optionsCache[ options ] = {};
+ jQuery.each( options.split( core_rspace ), function( _, flag ) {
+ object[ flag ] = true;
+ });
+ return object;
+}
+
+/*
+ * Create a callback list using the following parameters:
+ *
+ * options: an optional list of space-separated options that will change how
+ * the callback list behaves or a more traditional option object
+ *
+ * By default a callback list will act like an event callback list and can be
+ * "fired" multiple times.
+ *
+ * Possible options:
+ *
+ * once: will ensure the callback list can only be fired once (like a Deferred)
+ *
+ * memory: will keep track of previous values and will call any callback added
+ * after the list has been fired right away with the latest "memorized"
+ * values (like a Deferred)
+ *
+ * unique: will ensure a callback can only be added once (no duplicate in the list)
+ *
+ * stopOnFalse: interrupt callings when a callback returns false
+ *
+ */
+jQuery.Callbacks = function( options ) {
+
+ // Convert options from String-formatted to Object-formatted if needed
+ // (we check in cache first)
+ options = typeof options === "string" ?
+ ( optionsCache[ options ] || createOptions( options ) ) :
+ jQuery.extend( {}, options );
+
+ var // Last fire value (for non-forgettable lists)
+ memory,
+ // Flag to know if list was already fired
+ fired,
+ // Flag to know if list is currently firing
+ firing,
+ // First callback to fire (used internally by add and fireWith)
+ firingStart,
+ // End of the loop when firing
+ firingLength,
+ // Index of currently firing callback (modified by remove if needed)
+ firingIndex,
+ // Actual callback list
+ list = [],
+ // Stack of fire calls for repeatable lists
+ stack = !options.once && [],
+ // Fire callbacks
+ fire = function( data ) {
+ memory = options.memory && data;
+ fired = true;
+ firingIndex = firingStart || 0;
+ firingStart = 0;
+ firingLength = list.length;
+ firing = true;
+ for ( ; list && firingIndex < firingLength; firingIndex++ ) {
+ if ( list[ firingIndex ].apply( data[ 0 ], data[ 1 ] ) === false && options.stopOnFalse ) {
+ memory = false; // To prevent further calls using add
+ break;
+ }
+ }
+ firing = false;
+ if ( list ) {
+ if ( stack ) {
+ if ( stack.length ) {
+ fire( stack.shift() );
+ }
+ } else if ( memory ) {
+ list = [];
+ } else {
+ self.disable();
+ }
+ }
+ },
+ // Actual Callbacks object
+ self = {
+ // Add a callback or a collection of callbacks to the list
+ add: function() {
+ if ( list ) {
+ // First, we save the current length
+ var start = list.length;
+ (function add( args ) {
+ jQuery.each( args, function( _, arg ) {
+ var type = jQuery.type( arg );
+ if ( type === "function" ) {
+ if ( !options.unique || !self.has( arg ) ) {
+ list.push( arg );
+ }
+ } else if ( arg && arg.length && type !== "string" ) {
+ // Inspect recursively
+ add( arg );
+ }
+ });
+ })( arguments );
+ // Do we need to add the callbacks to the
+ // current firing batch?
+ if ( firing ) {
+ firingLength = list.length;
+ // With memory, if we're not firing then
+ // we should call right away
+ } else if ( memory ) {
+ firingStart = start;
+ fire( memory );
+ }
+ }
+ return this;
+ },
+ // Remove a callback from the list
+ remove: function() {
+ if ( list ) {
+ jQuery.each( arguments, function( _, arg ) {
+ var index;
+ while( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) {
+ list.splice( index, 1 );
+ // Handle firing indexes
+ if ( firing ) {
+ if ( index <= firingLength ) {
+ firingLength--;
+ }
+ if ( index <= firingIndex ) {
+ firingIndex--;
+ }
+ }
+ }
+ });
+ }
+ return this;
+ },
+ // Control if a given callback is in the list
+ has: function( fn ) {
+ return jQuery.inArray( fn, list ) > -1;
+ },
+ // Remove all callbacks from the list
+ empty: function() {
+ list = [];
+ return this;
+ },
+ // Have the list do nothing anymore
+ disable: function() {
+ list = stack = memory = undefined;
+ return this;
+ },
+ // Is it disabled?
+ disabled: function() {
+ return !list;
+ },
+ // Lock the list in its current state
+ lock: function() {
+ stack = undefined;
+ if ( !memory ) {
+ self.disable();
+ }
+ return this;
+ },
+ // Is it locked?
+ locked: function() {
+ return !stack;
+ },
+ // Call all callbacks with the given context and arguments
+ fireWith: function( context, args ) {
+ args = args || [];
+ args = [ context, args.slice ? args.slice() : args ];
+ if ( list && ( !fired || stack ) ) {
+ if ( firing ) {
+ stack.push( args );
+ } else {
+ fire( args );
+ }
+ }
+ return this;
+ },
+ // Call all the callbacks with the given arguments
+ fire: function() {
+ self.fireWith( this, arguments );
+ return this;
+ },
+ // To know if the callbacks have already been called at least once
+ fired: function() {
+ return !!fired;
+ }
+ };
+
+ return self;
+};
+jQuery.extend({
+
+ Deferred: function( func ) {
+ var tuples = [
+ // action, add listener, listener list, final state
+ [ "resolve", "done", jQuery.Callbacks("once memory"), "resolved" ],
+ [ "reject", "fail", jQuery.Callbacks("once memory"), "rejected" ],
+ [ "notify", "progress", jQuery.Callbacks("memory") ]
+ ],
+ state = "pending",
+ promise = {
+ state: function() {
+ return state;
+ },
+ always: function() {
+ deferred.done( arguments ).fail( arguments );
+ return this;
+ },
+ then: function( /* fnDone, fnFail, fnProgress */ ) {
+ var fns = arguments;
+ return jQuery.Deferred(function( newDefer ) {
+ jQuery.each( tuples, function( i, tuple ) {
+ var action = tuple[ 0 ],
+ fn = fns[ i ];
+ // deferred[ done | fail | progress ] for forwarding actions to newDefer
+ deferred[ tuple[1] ]( jQuery.isFunction( fn ) ?
+ function() {
+ var returned = fn.apply( this, arguments );
+ if ( returned && jQuery.isFunction( returned.promise ) ) {
+ returned.promise()
+ .done( newDefer.resolve )
+ .fail( newDefer.reject )
+ .progress( newDefer.notify );
+ } else {
+ newDefer[ action + "With" ]( this === deferred ? newDefer : this, [ returned ] );
+ }
+ } :
+ newDefer[ action ]
+ );
+ });
+ fns = null;
+ }).promise();
+ },
+ // Get a promise for this deferred
+ // If obj is provided, the promise aspect is added to the object
+ promise: function( obj ) {
+ return obj != null ? jQuery.extend( obj, promise ) : promise;
+ }
+ },
+ deferred = {};
+
+ // Keep pipe for back-compat
+ promise.pipe = promise.then;
+
+ // Add list-specific methods
+ jQuery.each( tuples, function( i, tuple ) {
+ var list = tuple[ 2 ],
+ stateString = tuple[ 3 ];
+
+ // promise[ done | fail | progress ] = list.add
+ promise[ tuple[1] ] = list.add;
+
+ // Handle state
+ if ( stateString ) {
+ list.add(function() {
+ // state = [ resolved | rejected ]
+ state = stateString;
+
+ // [ reject_list | resolve_list ].disable; progress_list.lock
+ }, tuples[ i ^ 1 ][ 2 ].disable, tuples[ 2 ][ 2 ].lock );
+ }
+
+ // deferred[ resolve | reject | notify ] = list.fire
+ deferred[ tuple[0] ] = list.fire;
+ deferred[ tuple[0] + "With" ] = list.fireWith;
+ });
+
+ // Make the deferred a promise
+ promise.promise( deferred );
+
+ // Call given func if any
+ if ( func ) {
+ func.call( deferred, deferred );
+ }
+
+ // All done!
+ return deferred;
+ },
+
+ // Deferred helper
+ when: function( subordinate /* , ..., subordinateN */ ) {
+ var i = 0,
+ resolveValues = core_slice.call( arguments ),
+ length = resolveValues.length,
+
+ // the count of uncompleted subordinates
+ remaining = length !== 1 || ( subordinate && jQuery.isFunction( subordinate.promise ) ) ? length : 0,
+
+ // the master Deferred. If resolveValues consist of only a single Deferred, just use that.
+ deferred = remaining === 1 ? subordinate : jQuery.Deferred(),
+
+ // Update function for both resolve and progress values
+ updateFunc = function( i, contexts, values ) {
+ return function( value ) {
+ contexts[ i ] = this;
+ values[ i ] = arguments.length > 1 ? core_slice.call( arguments ) : value;
+ if( values === progressValues ) {
+ deferred.notifyWith( contexts, values );
+ } else if ( !( --remaining ) ) {
+ deferred.resolveWith( contexts, values );
+ }
+ };
+ },
+
+ progressValues, progressContexts, resolveContexts;
+
+ // add listeners to Deferred subordinates; treat others as resolved
+ if ( length > 1 ) {
+ progressValues = new Array( length );
+ progressContexts = new Array( length );
+ resolveContexts = new Array( length );
+ for ( ; i < length; i++ ) {
+ if ( resolveValues[ i ] && jQuery.isFunction( resolveValues[ i ].promise ) ) {
+ resolveValues[ i ].promise()
+ .done( updateFunc( i, resolveContexts, resolveValues ) )
+ .fail( deferred.reject )
+ .progress( updateFunc( i, progressContexts, progressValues ) );
+ } else {
+ --remaining;
+ }
+ }
+ }
+
+ // if we're not waiting on anything, resolve the master
+ if ( !remaining ) {
+ deferred.resolveWith( resolveContexts, resolveValues );
+ }
+
+ return deferred.promise();
+ }
+});
+jQuery.support = (function() {
+
+ var support,
+ all,
+ a,
+ select,
+ opt,
+ input,
+ fragment,
+ eventName,
+ i,
+ isSupported,
+ clickFn,
+ div = document.createElement("div");
+
+ // Setup
+ div.setAttribute( "className", "t" );
+ div.innerHTML = " <link/><table></table><a href='/a'>a</a><input type='checkbox'/>";
+
+ // Support tests won't run in some limited or non-browser environments
+ all = div.getElementsByTagName("*");
+ a = div.getElementsByTagName("a")[ 0 ];
+ if ( !all || !a || !all.length ) {
+ return {};
+ }
+
+ // First batch of tests
+ select = document.createElement("select");
+ opt = select.appendChild( document.createElement("option") );
+ input = div.getElementsByTagName("input")[ 0 ];
+
+ a.style.cssText = "top:1px;float:left;opacity:.5";
+ support = {
+ // IE strips leading whitespace when .innerHTML is used
+ leadingWhitespace: ( div.firstChild.nodeType === 3 ),
+
+ // Make sure that tbody elements aren't automatically inserted
+ // IE will insert them into empty tables
+ tbody: !div.getElementsByTagName("tbody").length,
+
+ // Make sure that link elements get serialized correctly by innerHTML
+ // This requires a wrapper element in IE
+ htmlSerialize: !!div.getElementsByTagName("link").length,
+
+ // Get the style information from getAttribute
+ // (IE uses .cssText instead)
+ style: /top/.test( a.getAttribute("style") ),
+
+ // Make sure that URLs aren't manipulated
+ // (IE normalizes it by default)
+ hrefNormalized: ( a.getAttribute("href") === "/a" ),
+
+ // Make sure that element opacity exists
+ // (IE uses filter instead)
+ // Use a regex to work around a WebKit issue. See #5145
+ opacity: /^0.5/.test( a.style.opacity ),
+
+ // Verify style float existence
+ // (IE uses styleFloat instead of cssFloat)
+ cssFloat: !!a.style.cssFloat,
+
+ // Make sure that if no value is specified for a checkbox
+ // that it defaults to "on".
+ // (WebKit defaults to "" instead)
+ checkOn: ( input.value === "on" ),
+
+ // Make sure that a selected-by-default option has a working selected property.
+ // (WebKit defaults to false instead of true, IE too, if it's in an optgroup)
+ optSelected: opt.selected,
+
+ // Test setAttribute on camelCase class. If it works, we need attrFixes when doing get/setAttribute (ie6/7)
+ getSetAttribute: div.className !== "t",
+
+ // Tests for enctype support on a form (#6743)
+ enctype: !!document.createElement("form").enctype,
+
+ // Makes sure cloning an html5 element does not cause problems
+ // Where outerHTML is undefined, this still works
+ html5Clone: document.createElement("nav").cloneNode( true ).outerHTML !== "<:nav></:nav>",
+
+ // jQuery.support.boxModel DEPRECATED in 1.8 since we don't support Quirks Mode
+ boxModel: ( document.compatMode === "CSS1Compat" ),
+
+ // Will be defined later
+ submitBubbles: true,
+ changeBubbles: true,
+ focusinBubbles: false,
+ deleteExpando: true,
+ noCloneEvent: true,
+ inlineBlockNeedsLayout: false,
+ shrinkWrapBlocks: false,
+ reliableMarginRight: true,
+ boxSizingReliable: true,
+ pixelPosition: false
+ };
+
+ // Make sure checked status is properly cloned
+ input.checked = true;
+ support.noCloneChecked = input.cloneNode( true ).checked;
+
+ // Make sure that the options inside disabled selects aren't marked as disabled
+ // (WebKit marks them as disabled)
+ select.disabled = true;
+ support.optDisabled = !opt.disabled;
+
+ // Test to see if it's possible to delete an expando from an element
+ // Fails in Internet Explorer
+ try {
+ delete div.test;
+ } catch( e ) {
+ support.deleteExpando = false;
+ }
+
+ if ( !div.addEventListener && div.attachEvent && div.fireEvent ) {
+ div.attachEvent( "onclick", clickFn = function() {
+ // Cloning a node shouldn't copy over any
+ // bound event handlers (IE does this)
+ support.noCloneEvent = false;
+ });
+ div.cloneNode( true ).fireEvent("onclick");
+ div.detachEvent( "onclick", clickFn );
+ }
+
+ // Check if a radio maintains its value
+ // after being appended to the DOM
+ input = document.createElement("input");
+ input.value = "t";
+ input.setAttribute( "type", "radio" );
+ support.radioValue = input.value === "t";
+
+ input.setAttribute( "checked", "checked" );
+
+ // #11217 - WebKit loses check when the name is after the checked attribute
+ input.setAttribute( "name", "t" );
+
+ div.appendChild( input );
+ fragment = document.createDocumentFragment();
+ fragment.appendChild( div.lastChild );
+
+ // WebKit doesn't clone checked state correctly in fragments
+ support.checkClone = fragment.cloneNode( true ).cloneNode( true ).lastChild.checked;
+
+ // Check if a disconnected checkbox will retain its checked
+ // value of true after appended to the DOM (IE6/7)
+ support.appendChecked = input.checked;
+
+ fragment.removeChild( input );
+ fragment.appendChild( div );
+
+ // Technique from Juriy Zaytsev
+ // http://perfectionkills.com/detecting-event-support-without-browser-sniffing/
+ // We only care about the case where non-standard event systems
+ // are used, namely in IE. Short-circuiting here helps us to
+ // avoid an eval call (in setAttribute) which can cause CSP
+ // to go haywire. See: https://developer.mozilla.org/en/Security/CSP
+ if ( div.attachEvent ) {
+ for ( i in {
+ submit: true,
+ change: true,
+ focusin: true
+ }) {
+ eventName = "on" + i;
+ isSupported = ( eventName in div );
+ if ( !isSupported ) {
+ div.setAttribute( eventName, "return;" );
+ isSupported = ( typeof div[ eventName ] === "function" );
+ }
+ support[ i + "Bubbles" ] = isSupported;
+ }
+ }
+
+ // Run tests that need a body at doc ready
+ jQuery(function() {
+ var container, div, tds, marginDiv,
+ divReset = "padding:0;margin:0;border:0;display:block;overflow:hidden;",
+ body = document.getElementsByTagName("body")[0];
+
+ if ( !body ) {
+ // Return for frameset docs that don't have a body
+ return;
+ }
+
+ container = document.createElement("div");
+ container.style.cssText = "visibility:hidden;border:0;width:0;height:0;position:static;top:0;margin-top:1px";
+ body.insertBefore( container, body.firstChild );
+
+ // Construct the test element
+ div = document.createElement("div");
+ container.appendChild( div );
+
+ // Check if table cells still have offsetWidth/Height when they are set
+ // to display:none and there are still other visible table cells in a
+ // table row; if so, offsetWidth/Height are not reliable for use when
+ // determining if an element has been hidden directly using
+ // display:none (it is still safe to use offsets if a parent element is
+ // hidden; don safety goggles and see bug #4512 for more information).
+ // (only IE 8 fails this test)
+ div.innerHTML = "<table><tr><td></td><td>t</td></tr></table>";
+ tds = div.getElementsByTagName("td");
+ tds[ 0 ].style.cssText = "padding:0;margin:0;border:0;display:none";
+ isSupported = ( tds[ 0 ].offsetHeight === 0 );
+
+ tds[ 0 ].style.display = "";
+ tds[ 1 ].style.display = "none";
+
+ // Check if empty table cells still have offsetWidth/Height
+ // (IE <= 8 fail this test)
+ support.reliableHiddenOffsets = isSupported && ( tds[ 0 ].offsetHeight === 0 );
+
+ // Check box-sizing and margin behavior
+ div.innerHTML = "";
+ div.style.cssText = "box-sizing:border-box;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;";
+ support.boxSizing = ( div.offsetWidth === 4 );
+ support.doesNotIncludeMarginInBodyOffset = ( body.offsetTop !== 1 );
+
+ // NOTE: To any future maintainer, we've window.getComputedStyle
+ // because jsdom on node.js will break without it.
+ if ( window.getComputedStyle ) {
+ support.pixelPosition = ( window.getComputedStyle( div, null ) || {} ).top !== "1%";
+ support.boxSizingReliable = ( window.getComputedStyle( div, null ) || { width: "4px" } ).width === "4px";
+
+ // Check if div with explicit width and no margin-right incorrectly
+ // gets computed margin-right based on width of container. For more
+ // info see bug #3333
+ // Fails in WebKit before Feb 2011 nightlies
+ // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right
+ marginDiv = document.createElement("div");
+ marginDiv.style.cssText = div.style.cssText = divReset;
+ marginDiv.style.marginRight = marginDiv.style.width = "0";
+ div.style.width = "1px";
+ div.appendChild( marginDiv );
+ support.reliableMarginRight =
+ !parseFloat( ( window.getComputedStyle( marginDiv, null ) || {} ).marginRight );
+ }
+
+ if ( typeof div.style.zoom !== "undefined" ) {
+ // Check if natively block-level elements act like inline-block
+ // elements when setting their display to 'inline' and giving
+ // them layout
+ // (IE < 8 does this)
+ div.innerHTML = "";
+ div.style.cssText = divReset + "width:1px;padding:1px;display:inline;zoom:1";
+ support.inlineBlockNeedsLayout = ( div.offsetWidth === 3 );
+
+ // Check if elements with layout shrink-wrap their children
+ // (IE 6 does this)
+ div.style.display = "block";
+ div.style.overflow = "visible";
+ div.innerHTML = "<div></div>";
+ div.firstChild.style.width = "5px";
+ support.shrinkWrapBlocks = ( div.offsetWidth !== 3 );
+
+ container.style.zoom = 1;
+ }
+
+ // Null elements to avoid leaks in IE
+ body.removeChild( container );
+ container = div = tds = marginDiv = null;
+ });
+
+ // Null elements to avoid leaks in IE
+ fragment.removeChild( div );
+ all = a = select = opt = input = fragment = div = null;
+
+ return support;
+})();
+var rbrace = /(?:\{[\s\S]*\}|\[[\s\S]*\])$/,
+ rmultiDash = /([A-Z])/g;
+
+jQuery.extend({
+ cache: {},
+
+ deletedIds: [],
+
+ // Remove at next major release (1.9/2.0)
+ uuid: 0,
+
+ // Unique for each copy of jQuery on the page
+ // Non-digits removed to match rinlinejQuery
+ expando: "jQuery" + ( jQuery.fn.jquery + Math.random() ).replace( /\D/g, "" ),
+
+ // The following elements throw uncatchable exceptions if you
+ // attempt to add expando properties to them.
+ noData: {
+ "embed": true,
+ // Ban all objects except for Flash (which handle expandos)
+ "object": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",
+ "applet": true
+ },
+
+ hasData: function( elem ) {
+ elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ];
+ return !!elem && !isEmptyDataObject( elem );
+ },
+
+ data: function( elem, name, data, pvt /* Internal Use Only */ ) {
+ if ( !jQuery.acceptData( elem ) ) {
+ return;
+ }
+
+ var thisCache, ret,
+ internalKey = jQuery.expando,
+ getByName = typeof name === "string",
+
+ // We have to handle DOM nodes and JS objects differently because IE6-7
+ // can't GC object references properly across the DOM-JS boundary
+ isNode = elem.nodeType,
+
+ // Only DOM nodes need the global jQuery cache; JS object data is
+ // attached directly to the object so GC can occur automatically
+ cache = isNode ? jQuery.cache : elem,
+
+ // Only defining an ID for JS objects if its cache already exists allows
+ // the code to shortcut on the same path as a DOM node with no cache
+ id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey;
+
+ // Avoid doing any more work than we need to when trying to get data on an
+ // object that has no data at all
+ if ( (!id || !cache[id] || (!pvt && !cache[id].data)) && getByName && data === undefined ) {
+ return;
+ }
+
+ if ( !id ) {
+ // Only DOM nodes need a new unique ID for each element since their data
+ // ends up in the global cache
+ if ( isNode ) {
+ elem[ internalKey ] = id = jQuery.deletedIds.pop() || jQuery.guid++;
+ } else {
+ id = internalKey;
+ }
+ }
+
+ if ( !cache[ id ] ) {
+ cache[ id ] = {};
+
+ // Avoids exposing jQuery metadata on plain JS objects when the object
+ // is serialized using JSON.stringify
+ if ( !isNode ) {
+ cache[ id ].toJSON = jQuery.noop;
+ }
+ }
+
+ // An object can be passed to jQuery.data instead of a key/value pair; this gets
+ // shallow copied over onto the existing cache
+ if ( typeof name === "object" || typeof name === "function" ) {
+ if ( pvt ) {
+ cache[ id ] = jQuery.extend( cache[ id ], name );
+ } else {
+ cache[ id ].data = jQuery.extend( cache[ id ].data, name );
+ }
+ }
+
+ thisCache = cache[ id ];
+
+ // jQuery data() is stored in a separate object inside the object's internal data
+ // cache in order to avoid key collisions between internal data and user-defined
+ // data.
+ if ( !pvt ) {
+ if ( !thisCache.data ) {
+ thisCache.data = {};
+ }
+
+ thisCache = thisCache.data;
+ }
+
+ if ( data !== undefined ) {
+ thisCache[ jQuery.camelCase( name ) ] = data;
+ }
+
+ // Check for both converted-to-camel and non-converted data property names
+ // If a data property was specified
+ if ( getByName ) {
+
+ // First Try to find as-is property data
+ ret = thisCache[ name ];
+
+ // Test for null|undefined property data
+ if ( ret == null ) {
+
+ // Try to find the camelCased property
+ ret = thisCache[ jQuery.camelCase( name ) ];
+ }
+ } else {
+ ret = thisCache;
+ }
+
+ return ret;
+ },
+
+ removeData: function( elem, name, pvt /* Internal Use Only */ ) {
+ if ( !jQuery.acceptData( elem ) ) {
+ return;
+ }
+
+ var thisCache, i, l,
+
+ isNode = elem.nodeType,
+
+ // See jQuery.data for more information
+ cache = isNode ? jQuery.cache : elem,
+ id = isNode ? elem[ jQuery.expando ] : jQuery.expando;
+
+ // If there is already no cache entry for this object, there is no
+ // purpose in continuing
+ if ( !cache[ id ] ) {
+ return;
+ }
+
+ if ( name ) {
+
+ thisCache = pvt ? cache[ id ] : cache[ id ].data;
+
+ if ( thisCache ) {
+
+ // Support array or space separated string names for data keys
+ if ( !jQuery.isArray( name ) ) {
+
+ // try the string as a key before any manipulation
+ if ( name in thisCache ) {
+ name = [ name ];
+ } else {
+
+ // split the camel cased version by spaces unless a key with the spaces exists
+ name = jQuery.camelCase( name );
+ if ( name in thisCache ) {
+ name = [ name ];
+ } else {
+ name = name.split(" ");
+ }
+ }
+ }
+
+ for ( i = 0, l = name.length; i < l; i++ ) {
+ delete thisCache[ name[i] ];
+ }
+
+ // If there is no data left in the cache, we want to continue
+ // and let the cache object itself get destroyed
+ if ( !( pvt ? isEmptyDataObject : jQuery.isEmptyObject )( thisCache ) ) {
+ return;
+ }
+ }
+ }
+
+ // See jQuery.data for more information
+ if ( !pvt ) {
+ delete cache[ id ].data;
+
+ // Don't destroy the parent cache unless the internal data object
+ // had been the only thing left in it
+ if ( !isEmptyDataObject( cache[ id ] ) ) {
+ return;
+ }
+ }
+
+ // Destroy the cache
+ if ( isNode ) {
+ jQuery.cleanData( [ elem ], true );
+
+ // Use delete when supported for expandos or `cache` is not a window per isWindow (#10080)
+ } else if ( jQuery.support.deleteExpando || cache != cache.window ) {
+ delete cache[ id ];
+
+ // When all else fails, null
+ } else {
+ cache[ id ] = null;
+ }
+ },
+
+ // For internal use only.
+ _data: function( elem, name, data ) {
+ return jQuery.data( elem, name, data, true );
+ },
+
+ // A method for determining if a DOM node can handle the data expando
+ acceptData: function( elem ) {
+ var noData = elem.nodeName && jQuery.noData[ elem.nodeName.toLowerCase() ];
+
+ // nodes accept data unless otherwise specified; rejection can be conditional
+ return !noData || noData !== true && elem.getAttribute("classid") === noData;
+ }
+});
+
+jQuery.fn.extend({
+ data: function( key, value ) {
+ var parts, part, attr, name, l,
+ elem = this[0],
+ i = 0,
+ data = null;
+
+ // Gets all values
+ if ( key === undefined ) {
+ if ( this.length ) {
+ data = jQuery.data( elem );
+
+ if ( elem.nodeType === 1 && !jQuery._data( elem, "parsedAttrs" ) ) {
+ attr = elem.attributes;
+ for ( l = attr.length; i < l; i++ ) {
+ name = attr[i].name;
+
+ if ( !name.indexOf( "data-" ) ) {
+ name = jQuery.camelCase( name.substring(5) );
+
+ dataAttr( elem, name, data[ name ] );
+ }
+ }
+ jQuery._data( elem, "parsedAttrs", true );
+ }
+ }
+
+ return data;
+ }
+
+ // Sets multiple values
+ if ( typeof key === "object" ) {
+ return this.each(function() {
+ jQuery.data( this, key );
+ });
+ }
+
+ parts = key.split( ".", 2 );
+ parts[1] = parts[1] ? "." + parts[1] : "";
+ part = parts[1] + "!";
+
+ return jQuery.access( this, function( value ) {
+
+ if ( value === undefined ) {
+ data = this.triggerHandler( "getData" + part, [ parts[0] ] );
+
+ // Try to fetch any internally stored data first
+ if ( data === undefined && elem ) {
+ data = jQuery.data( elem, key );
+ data = dataAttr( elem, key, data );
+ }
+
+ return data === undefined && parts[1] ?
+ this.data( parts[0] ) :
+ data;
+ }
+
+ parts[1] = value;
+ this.each(function() {
+ var self = jQuery( this );
+
+ self.triggerHandler( "setData" + part, parts );
+ jQuery.data( this, key, value );
+ self.triggerHandler( "changeData" + part, parts );
+ });
+ }, null, value, arguments.length > 1, null, false );
+ },
+
+ removeData: function( key ) {
+ return this.each(function() {
+ jQuery.removeData( this, key );
+ });
+ }
+});
+
+function dataAttr( elem, key, data ) {
+ // If nothing was found internally, try to fetch any
+ // data from the HTML5 data-* attribute
+ if ( data === undefined && elem.nodeType === 1 ) {
+
+ var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase();
+
+ data = elem.getAttribute( name );
+
+ if ( typeof data === "string" ) {
+ try {
+ data = data === "true" ? true :
+ data === "false" ? false :
+ data === "null" ? null :
+ // Only convert to a number if it doesn't change the string
+ +data + "" === data ? +data :
+ rbrace.test( data ) ? jQuery.parseJSON( data ) :
+ data;
+ } catch( e ) {}
+
+ // Make sure we set the data so it isn't changed later
+ jQuery.data( elem, key, data );
+
+ } else {
+ data = undefined;
+ }
+ }
+
+ return data;
+}
+
+// checks a cache object for emptiness
+function isEmptyDataObject( obj ) {
+ var name;
+ for ( name in obj ) {
+
+ // if the public data object is empty, the private is still empty
+ if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) {
+ continue;
+ }
+ if ( name !== "toJSON" ) {
+ return false;
+ }
+ }
+
+ return true;
+}
+jQuery.extend({
+ queue: function( elem, type, data ) {
+ var queue;
+
+ if ( elem ) {
+ type = ( type || "fx" ) + "queue";
+ queue = jQuery._data( elem, type );
+
+ // Speed up dequeue by getting out quickly if this is just a lookup
+ if ( data ) {
+ if ( !queue || jQuery.isArray(data) ) {
+ queue = jQuery._data( elem, type, jQuery.makeArray(data) );
+ } else {
+ queue.push( data );
+ }
+ }
+ return queue || [];
+ }
+ },
+
+ dequeue: function( elem, type ) {
+ type = type || "fx";
+
+ var queue = jQuery.queue( elem, type ),
+ startLength = queue.length,
+ fn = queue.shift(),
+ hooks = jQuery._queueHooks( elem, type ),
+ next = function() {
+ jQuery.dequeue( elem, type );
+ };
+
+ // If the fx queue is dequeued, always remove the progress sentinel
+ if ( fn === "inprogress" ) {
+ fn = queue.shift();
+ startLength--;
+ }
+
+ if ( fn ) {
+
+ // Add a progress sentinel to prevent the fx queue from being
+ // automatically dequeued
+ if ( type === "fx" ) {
+ queue.unshift( "inprogress" );
+ }
+
+ // clear up the last queue stop function
+ delete hooks.stop;
+ fn.call( elem, next, hooks );
+ }
+
+ if ( !startLength && hooks ) {
+ hooks.empty.fire();
+ }
+ },
+
+ // not intended for public consumption - generates a queueHooks object, or returns the current one
+ _queueHooks: function( elem, type ) {
+ var key = type + "queueHooks";
+ return jQuery._data( elem, key ) || jQuery._data( elem, key, {
+ empty: jQuery.Callbacks("once memory").add(function() {
+ jQuery.removeData( elem, type + "queue", true );
+ jQuery.removeData( elem, key, true );
+ })
+ });
+ }
+});
+
+jQuery.fn.extend({
+ queue: function( type, data ) {
+ var setter = 2;
+
+ if ( typeof type !== "string" ) {
+ data = type;
+ type = "fx";
+ setter--;
+ }
+
+ if ( arguments.length < setter ) {
+ return jQuery.queue( this[0], type );
+ }
+
+ return data === undefined ?
+ this :
+ this.each(function() {
+ var queue = jQuery.queue( this, type, data );
+
+ // ensure a hooks for this queue
+ jQuery._queueHooks( this, type );
+
+ if ( type === "fx" && queue[0] !== "inprogress" ) {
+ jQuery.dequeue( this, type );
+ }
+ });
+ },
+ dequeue: function( type ) {
+ return this.each(function() {
+ jQuery.dequeue( this, type );
+ });
+ },
+ // Based off of the plugin by Clint Helfers, with permission.
+ // http://blindsignals.com/index.php/2009/07/jquery-delay/
+ delay: function( time, type ) {
+ time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time;
+ type = type || "fx";
+
+ return this.queue( type, function( next, hooks ) {
+ var timeout = setTimeout( next, time );
+ hooks.stop = function() {
+ clearTimeout( timeout );
+ };
+ });
+ },
+ clearQueue: function( type ) {
+ return this.queue( type || "fx", [] );
+ },
+ // Get a promise resolved when queues of a certain type
+ // are emptied (fx is the type by default)
+ promise: function( type, obj ) {
+ var tmp,
+ count = 1,
+ defer = jQuery.Deferred(),
+ elements = this,
+ i = this.length,
+ resolve = function() {
+ if ( !( --count ) ) {
+ defer.resolveWith( elements, [ elements ] );
+ }
+ };
+
+ if ( typeof type !== "string" ) {
+ obj = type;
+ type = undefined;
+ }
+ type = type || "fx";
+
+ while( i-- ) {
+ tmp = jQuery._data( elements[ i ], type + "queueHooks" );
+ if ( tmp && tmp.empty ) {
+ count++;
+ tmp.empty.add( resolve );
+ }
+ }
+ resolve();
+ return defer.promise( obj );
+ }
+});
+var nodeHook, boolHook, fixSpecified,
+ rclass = /[\t\r\n]/g,
+ rreturn = /\r/g,
+ rtype = /^(?:button|input)$/i,
+ rfocusable = /^(?:button|input|object|select|textarea)$/i,
+ rclickable = /^a(?:rea|)$/i,
+ rboolean = /^(?:autofocus|autoplay|async|checked|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped|selected)$/i,
+ getSetAttribute = jQuery.support.getSetAttribute;
+
+jQuery.fn.extend({
+ attr: function( name, value ) {
+ return jQuery.access( this, jQuery.attr, name, value, arguments.length > 1 );
+ },
+
+ removeAttr: function( name ) {
+ return this.each(function() {
+ jQuery.removeAttr( this, name );
+ });
+ },
+
+ prop: function( name, value ) {
+ return jQuery.access( this, jQuery.prop, name, value, arguments.length > 1 );
+ },
+
+ removeProp: function( name ) {
+ name = jQuery.propFix[ name ] || name;
+ return this.each(function() {
+ // try/catch handles cases where IE balks (such as removing a property on window)
+ try {
+ this[ name ] = undefined;
+ delete this[ name ];
+ } catch( e ) {}
+ });
+ },
+
+ addClass: function( value ) {
+ var classNames, i, l, elem,
+ setClass, c, cl;
+
+ if ( jQuery.isFunction( value ) ) {
+ return this.each(function( j ) {
+ jQuery( this ).addClass( value.call(this, j, this.className) );
+ });
+ }
+
+ if ( value && typeof value === "string" ) {
+ classNames = value.split( core_rspace );
+
+ for ( i = 0, l = this.length; i < l; i++ ) {
+ elem = this[ i ];
+
+ if ( elem.nodeType === 1 ) {
+ if ( !elem.className && classNames.length === 1 ) {
+ elem.className = value;
+
+ } else {
+ setClass = " " + elem.className + " ";
+
+ for ( c = 0, cl = classNames.length; c < cl; c++ ) {
+ if ( setClass.indexOf( " " + classNames[ c ] + " " ) < 0 ) {
+ setClass += classNames[ c ] + " ";
+ }
+ }
+ elem.className = jQuery.trim( setClass );
+ }
+ }
+ }
+ }
+
+ return this;
+ },
+
+ removeClass: function( value ) {
+ var removes, className, elem, c, cl, i, l;
+
+ if ( jQuery.isFunction( value ) ) {
+ return this.each(function( j ) {
+ jQuery( this ).removeClass( value.call(this, j, this.className) );
+ });
+ }
+ if ( (value && typeof value === "string") || value === undefined ) {
+ removes = ( value || "" ).split( core_rspace );
+
+ for ( i = 0, l = this.length; i < l; i++ ) {
+ elem = this[ i ];
+ if ( elem.nodeType === 1 && elem.className ) {
+
+ className = (" " + elem.className + " ").replace( rclass, " " );
+
+ // loop over each item in the removal list
+ for ( c = 0, cl = removes.length; c < cl; c++ ) {
+ // Remove until there is nothing to remove,
+ while ( className.indexOf(" " + removes[ c ] + " ") >= 0 ) {
+ className = className.replace( " " + removes[ c ] + " " , " " );
+ }
+ }
+ elem.className = value ? jQuery.trim( className ) : "";
+ }
+ }
+ }
+
+ return this;
+ },
+
+ toggleClass: function( value, stateVal ) {
+ var type = typeof value,
+ isBool = typeof stateVal === "boolean";
+
+ if ( jQuery.isFunction( value ) ) {
+ return this.each(function( i ) {
+ jQuery( this ).toggleClass( value.call(this, i, this.className, stateVal), stateVal );
+ });
+ }
+
+ return this.each(function() {
+ if ( type === "string" ) {
+ // toggle individual class names
+ var className,
+ i = 0,
+ self = jQuery( this ),
+ state = stateVal,
+ classNames = value.split( core_rspace );
+
+ while ( (className = classNames[ i++ ]) ) {
+ // check each className given, space separated list
+ state = isBool ? state : !self.hasClass( className );
+ self[ state ? "addClass" : "removeClass" ]( className );
+ }
+
+ } else if ( type === "undefined" || type === "boolean" ) {
+ if ( this.className ) {
+ // store className if set
+ jQuery._data( this, "__className__", this.className );
+ }
+
+ // toggle whole className
+ this.className = this.className || value === false ? "" : jQuery._data( this, "__className__" ) || "";
+ }
+ });
+ },
+
+ hasClass: function( selector ) {
+ var className = " " + selector + " ",
+ i = 0,
+ l = this.length;
+ for ( ; i < l; i++ ) {
+ if ( this[i].nodeType === 1 && (" " + this[i].className + " ").replace(rclass, " ").indexOf( className ) >= 0 ) {
+ return true;
+ }
+ }
+
+ return false;
+ },
+
+ val: function( value ) {
+ var hooks, ret, isFunction,
+ elem = this[0];
+
+ if ( !arguments.length ) {
+ if ( elem ) {
+ hooks = jQuery.valHooks[ elem.type ] || jQuery.valHooks[ elem.nodeName.toLowerCase() ];
+
+ if ( hooks && "get" in hooks && (ret = hooks.get( elem, "value" )) !== undefined ) {
+ return ret;
+ }
+
+ ret = elem.value;
+
+ return typeof ret === "string" ?
+ // handle most common string cases
+ ret.replace(rreturn, "") :
+ // handle cases where value is null/undef or number
+ ret == null ? "" : ret;
+ }
+
+ return;
+ }
+
+ isFunction = jQuery.isFunction( value );
+
+ return this.each(function( i ) {
+ var val,
+ self = jQuery(this);
+
+ if ( this.nodeType !== 1 ) {
+ return;
+ }
+
+ if ( isFunction ) {
+ val = value.call( this, i, self.val() );
+ } else {
+ val = value;
+ }
+
+ // Treat null/undefined as ""; convert numbers to string
+ if ( val == null ) {
+ val = "";
+ } else if ( typeof val === "number" ) {
+ val += "";
+ } else if ( jQuery.isArray( val ) ) {
+ val = jQuery.map(val, function ( value ) {
+ return value == null ? "" : value + "";
+ });
+ }
+
+ hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ];
+
+ // If set returns undefined, fall back to normal setting
+ if ( !hooks || !("set" in hooks) || hooks.set( this, val, "value" ) === undefined ) {
+ this.value = val;
+ }
+ });
+ }
+});
+
+jQuery.extend({
+ valHooks: {
+ option: {
+ get: function( elem ) {
+ // attributes.value is undefined in Blackberry 4.7 but
+ // uses .value. See #6932
+ var val = elem.attributes.value;
+ return !val || val.specified ? elem.value : elem.text;
+ }
+ },
+ select: {
+ get: function( elem ) {
+ var value, option,
+ options = elem.options,
+ index = elem.selectedIndex,
+ one = elem.type === "select-one" || index < 0,
+ values = one ? null : [],
+ max = one ? index + 1 : options.length,
+ i = index < 0 ?
+ max :
+ one ? index : 0;
+
+ // Loop through all the selected options
+ for ( ; i < max; i++ ) {
+ option = options[ i ];
+
+ // oldIE doesn't update selected after form reset (#2551)
+ if ( ( option.selected || i === index ) &&
+ // Don't return options that are disabled or in a disabled optgroup
+ ( jQuery.support.optDisabled ? !option.disabled : option.getAttribute("disabled") === null ) &&
+ ( !option.parentNode.disabled || !jQuery.nodeName( option.parentNode, "optgroup" ) ) ) {
+
+ // Get the specific value for the option
+ value = jQuery( option ).val();
+
+ // We don't need an array for one selects
+ if ( one ) {
+ return value;
+ }
+
+ // Multi-Selects return an array
+ values.push( value );
+ }
+ }
+
+ return values;
+ },
+
+ set: function( elem, value ) {
+ var values = jQuery.makeArray( value );
+
+ jQuery(elem).find("option").each(function() {
+ this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0;
+ });
+
+ if ( !values.length ) {
+ elem.selectedIndex = -1;
+ }
+ return values;
+ }
+ }
+ },
+
+ // Unused in 1.8, left in so attrFn-stabbers won't die; remove in 1.9
+ attrFn: {},
+
+ attr: function( elem, name, value, pass ) {
+ var ret, hooks, notxml,
+ nType = elem.nodeType;
+
+ // don't get/set attributes on text, comment and attribute nodes
+ if ( !elem || nType === 3 || nType === 8 || nType === 2 ) {
+ return;
+ }
+
+ if ( pass && jQuery.isFunction( jQuery.fn[ name ] ) ) {
+ return jQuery( elem )[ name ]( value );
+ }
+
+ // Fallback to prop when attributes are not supported
+ if ( typeof elem.getAttribute === "undefined" ) {
+ return jQuery.prop( elem, name, value );
+ }
+
+ notxml = nType !== 1 || !jQuery.isXMLDoc( elem );
+
+ // All attributes are lowercase
+ // Grab necessary hook if one is defined
+ if ( notxml ) {
+ name = name.toLowerCase();
+ hooks = jQuery.attrHooks[ name ] || ( rboolean.test( name ) ? boolHook : nodeHook );
+ }
+
+ if ( value !== undefined ) {
+
+ if ( value === null ) {
+ jQuery.removeAttr( elem, name );
+ return;
+
+ } else if ( hooks && "set" in hooks && notxml && (ret = hooks.set( elem, value, name )) !== undefined ) {
+ return ret;
+
+ } else {
+ elem.setAttribute( name, value + "" );
+ return value;
+ }
+
+ } else if ( hooks && "get" in hooks && notxml && (ret = hooks.get( elem, name )) !== null ) {
+ return ret;
+
+ } else {
+
+ ret = elem.getAttribute( name );
+
+ // Non-existent attributes return null, we normalize to undefined
+ return ret === null ?
+ undefined :
+ ret;
+ }
+ },
+
+ removeAttr: function( elem, value ) {
+ var propName, attrNames, name, isBool,
+ i = 0;
+
+ if ( value && elem.nodeType === 1 ) {
+
+ attrNames = value.split( core_rspace );
+
+ for ( ; i < attrNames.length; i++ ) {
+ name = attrNames[ i ];
+
+ if ( name ) {
+ propName = jQuery.propFix[ name ] || name;
+ isBool = rboolean.test( name );
+
+ // See #9699 for explanation of this approach (setting first, then removal)
+ // Do not do this for boolean attributes (see #10870)
+ if ( !isBool ) {
+ jQuery.attr( elem, name, "" );
+ }
+ elem.removeAttribute( getSetAttribute ? name : propName );
+
+ // Set corresponding property to false for boolean attributes
+ if ( isBool && propName in elem ) {
+ elem[ propName ] = false;
+ }
+ }
+ }
+ }
+ },
+
+ attrHooks: {
+ type: {
+ set: function( elem, value ) {
+ // We can't allow the type property to be changed (since it causes problems in IE)
+ if ( rtype.test( elem.nodeName ) && elem.parentNode ) {
+ jQuery.error( "type property can't be changed" );
+ } else if ( !jQuery.support.radioValue && value === "radio" && jQuery.nodeName(elem, "input") ) {
+ // Setting the type on a radio button after the value resets the value in IE6-9
+ // Reset value to it's default in case type is set after value
+ // This is for element creation
+ var val = elem.value;
+ elem.setAttribute( "type", value );
+ if ( val ) {
+ elem.value = val;
+ }
+ return value;
+ }
+ }
+ },
+ // Use the value property for back compat
+ // Use the nodeHook for button elements in IE6/7 (#1954)
+ value: {
+ get: function( elem, name ) {
+ if ( nodeHook && jQuery.nodeName( elem, "button" ) ) {
+ return nodeHook.get( elem, name );
+ }
+ return name in elem ?
+ elem.value :
+ null;
+ },
+ set: function( elem, value, name ) {
+ if ( nodeHook && jQuery.nodeName( elem, "button" ) ) {
+ return nodeHook.set( elem, value, name );
+ }
+ // Does not return so that setAttribute is also used
+ elem.value = value;
+ }
+ }
+ },
+
+ propFix: {
+ tabindex: "tabIndex",
+ readonly: "readOnly",
+ "for": "htmlFor",
+ "class": "className",
+ maxlength: "maxLength",
+ cellspacing: "cellSpacing",
+ cellpadding: "cellPadding",
+ rowspan: "rowSpan",
+ colspan: "colSpan",
+ usemap: "useMap",
+ frameborder: "frameBorder",
+ contenteditable: "contentEditable"
+ },
+
+ prop: function( elem, name, value ) {
+ var ret, hooks, notxml,
+ nType = elem.nodeType;
+
+ // don't get/set properties on text, comment and attribute nodes
+ if ( !elem || nType === 3 || nType === 8 || nType === 2 ) {
+ return;
+ }
+
+ notxml = nType !== 1 || !jQuery.isXMLDoc( elem );
+
+ if ( notxml ) {
+ // Fix name and attach hooks
+ name = jQuery.propFix[ name ] || name;
+ hooks = jQuery.propHooks[ name ];
+ }
+
+ if ( value !== undefined ) {
+ if ( hooks && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ) {
+ return ret;
+
+ } else {
+ return ( elem[ name ] = value );
+ }
+
+ } else {
+ if ( hooks && "get" in hooks && (ret = hooks.get( elem, name )) !== null ) {
+ return ret;
+
+ } else {
+ return elem[ name ];
+ }
+ }
+ },
+
+ propHooks: {
+ tabIndex: {
+ get: function( elem ) {
+ // elem.tabIndex doesn't always return the correct value when it hasn't been explicitly set
+ // http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/
+ var attributeNode = elem.getAttributeNode("tabindex");
+
+ return attributeNode && attributeNode.specified ?
+ parseInt( attributeNode.value, 10 ) :
+ rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ?
+ 0 :
+ undefined;
+ }
+ }
+ }
+});
+
+// Hook for boolean attributes
+boolHook = {
+ get: function( elem, name ) {
+ // Align boolean attributes with corresponding properties
+ // Fall back to attribute presence where some booleans are not supported
+ var attrNode,
+ property = jQuery.prop( elem, name );
+ return property === true || typeof property !== "boolean" && ( attrNode = elem.getAttributeNode(name) ) && attrNode.nodeValue !== false ?
+ name.toLowerCase() :
+ undefined;
+ },
+ set: function( elem, value, name ) {
+ var propName;
+ if ( value === false ) {
+ // Remove boolean attributes when set to false
+ jQuery.removeAttr( elem, name );
+ } else {
+ // value is true since we know at this point it's type boolean and not false
+ // Set boolean attributes to the same name and set the DOM property
+ propName = jQuery.propFix[ name ] || name;
+ if ( propName in elem ) {
+ // Only set the IDL specifically if it already exists on the element
+ elem[ propName ] = true;
+ }
+
+ elem.setAttribute( name, name.toLowerCase() );
+ }
+ return name;
+ }
+};
+
+// IE6/7 do not support getting/setting some attributes with get/setAttribute
+if ( !getSetAttribute ) {
+
+ fixSpecified = {
+ name: true,
+ id: true,
+ coords: true
+ };
+
+ // Use this for any attribute in IE6/7
+ // This fixes almost every IE6/7 issue
+ nodeHook = jQuery.valHooks.button = {
+ get: function( elem, name ) {
+ var ret;
+ ret = elem.getAttributeNode( name );
+ return ret && ( fixSpecified[ name ] ? ret.value !== "" : ret.specified ) ?
+ ret.value :
+ undefined;
+ },
+ set: function( elem, value, name ) {
+ // Set the existing or create a new attribute node
+ var ret = elem.getAttributeNode( name );
+ if ( !ret ) {
+ ret = document.createAttribute( name );
+ elem.setAttributeNode( ret );
+ }
+ return ( ret.value = value + "" );
+ }
+ };
+
+ // Set width and height to auto instead of 0 on empty string( Bug #8150 )
+ // This is for removals
+ jQuery.each([ "width", "height" ], function( i, name ) {
+ jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], {
+ set: function( elem, value ) {
+ if ( value === "" ) {
+ elem.setAttribute( name, "auto" );
+ return value;
+ }
+ }
+ });
+ });
+
+ // Set contenteditable to false on removals(#10429)
+ // Setting to empty string throws an error as an invalid value
+ jQuery.attrHooks.contenteditable = {
+ get: nodeHook.get,
+ set: function( elem, value, name ) {
+ if ( value === "" ) {
+ value = "false";
+ }
+ nodeHook.set( elem, value, name );
+ }
+ };
+}
+
+
+// Some attributes require a special call on IE
+if ( !jQuery.support.hrefNormalized ) {
+ jQuery.each([ "href", "src", "width", "height" ], function( i, name ) {
+ jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], {
+ get: function( elem ) {
+ var ret = elem.getAttribute( name, 2 );
+ return ret === null ? undefined : ret;
+ }
+ });
+ });
+}
+
+if ( !jQuery.support.style ) {
+ jQuery.attrHooks.style = {
+ get: function( elem ) {
+ // Return undefined in the case of empty string
+ // Normalize to lowercase since IE uppercases css property names
+ return elem.style.cssText.toLowerCase() || undefined;
+ },
+ set: function( elem, value ) {
+ return ( elem.style.cssText = value + "" );
+ }
+ };
+}
+
+// Safari mis-reports the default selected property of an option
+// Accessing the parent's selectedIndex property fixes it
+if ( !jQuery.support.optSelected ) {
+ jQuery.propHooks.selected = jQuery.extend( jQuery.propHooks.selected, {
+ get: function( elem ) {
+ var parent = elem.parentNode;
+
+ if ( parent ) {
+ parent.selectedIndex;
+
+ // Make sure that it also works with optgroups, see #5701
+ if ( parent.parentNode ) {
+ parent.parentNode.selectedIndex;
+ }
+ }
+ return null;
+ }
+ });
+}
+
+// IE6/7 call enctype encoding
+if ( !jQuery.support.enctype ) {
+ jQuery.propFix.enctype = "encoding";
+}
+
+// Radios and checkboxes getter/setter
+if ( !jQuery.support.checkOn ) {
+ jQuery.each([ "radio", "checkbox" ], function() {
+ jQuery.valHooks[ this ] = {
+ get: function( elem ) {
+ // Handle the case where in Webkit "" is returned instead of "on" if a value isn't specified
+ return elem.getAttribute("value") === null ? "on" : elem.value;
+ }
+ };
+ });
+}
+jQuery.each([ "radio", "checkbox" ], function() {
+ jQuery.valHooks[ this ] = jQuery.extend( jQuery.valHooks[ this ], {
+ set: function( elem, value ) {
+ if ( jQuery.isArray( value ) ) {
+ return ( elem.checked = jQuery.inArray( jQuery(elem).val(), value ) >= 0 );
+ }
+ }
+ });
+});
+var rformElems = /^(?:textarea|input|select)$/i,
+ rtypenamespace = /^([^\.]*|)(?:\.(.+)|)$/,
+ rhoverHack = /(?:^|\s)hover(\.\S+|)\b/,
+ rkeyEvent = /^key/,
+ rmouseEvent = /^(?:mouse|contextmenu)|click/,
+ rfocusMorph = /^(?:focusinfocus|focusoutblur)$/,
+ hoverHack = function( events ) {
+ return jQuery.event.special.hover ? events : events.replace( rhoverHack, "mouseenter$1 mouseleave$1" );
+ };
+
+/*
+ * Helper functions for managing events -- not part of the public interface.
+ * Props to Dean Edwards' addEvent library for many of the ideas.
+ */
+jQuery.event = {
+
+ add: function( elem, types, handler, data, selector ) {
+
+ var elemData, eventHandle, events,
+ t, tns, type, namespaces, handleObj,
+ handleObjIn, handlers, special;
+
+ // Don't attach events to noData or text/comment nodes (allow plain objects tho)
+ if ( elem.nodeType === 3 || elem.nodeType === 8 || !types || !handler || !(elemData = jQuery._data( elem )) ) {
+ return;
+ }
+
+ // Caller can pass in an object of custom data in lieu of the handler
+ if ( handler.handler ) {
+ handleObjIn = handler;
+ handler = handleObjIn.handler;
+ selector = handleObjIn.selector;
+ }
+
+ // Make sure that the handler has a unique ID, used to find/remove it later
+ if ( !handler.guid ) {
+ handler.guid = jQuery.guid++;
+ }
+
+ // Init the element's event structure and main handler, if this is the first
+ events = elemData.events;
+ if ( !events ) {
+ elemData.events = events = {};
+ }
+ eventHandle = elemData.handle;
+ if ( !eventHandle ) {
+ elemData.handle = eventHandle = function( e ) {
+ // Discard the second event of a jQuery.event.trigger() and
+ // when an event is called after a page has unloaded
+ return typeof jQuery !== "undefined" && (!e || jQuery.event.triggered !== e.type) ?
+ jQuery.event.dispatch.apply( eventHandle.elem, arguments ) :
+ undefined;
+ };
+ // Add elem as a property of the handle fn to prevent a memory leak with IE non-native events
+ eventHandle.elem = elem;
+ }
+
+ // Handle multiple events separated by a space
+ // jQuery(...).bind("mouseover mouseout", fn);
+ types = jQuery.trim( hoverHack(types) ).split( " " );
+ for ( t = 0; t < types.length; t++ ) {
+
+ tns = rtypenamespace.exec( types[t] ) || [];
+ type = tns[1];
+ namespaces = ( tns[2] || "" ).split( "." ).sort();
+
+ // If event changes its type, use the special event handlers for the changed type
+ special = jQuery.event.special[ type ] || {};
+
+ // If selector defined, determine special event api type, otherwise given type
+ type = ( selector ? special.delegateType : special.bindType ) || type;
+
+ // Update special based on newly reset type
+ special = jQuery.event.special[ type ] || {};
+
+ // handleObj is passed to all event handlers
+ handleObj = jQuery.extend({
+ type: type,
+ origType: tns[1],
+ data: data,
+ handler: handler,
+ guid: handler.guid,
+ selector: selector,
+ needsContext: selector && jQuery.expr.match.needsContext.test( selector ),
+ namespace: namespaces.join(".")
+ }, handleObjIn );
+
+ // Init the event handler queue if we're the first
+ handlers = events[ type ];
+ if ( !handlers ) {
+ handlers = events[ type ] = [];
+ handlers.delegateCount = 0;
+
+ // Only use addEventListener/attachEvent if the special events handler returns false
+ if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) {
+ // Bind the global event handler to the element
+ if ( elem.addEventListener ) {
+ elem.addEventListener( type, eventHandle, false );
+
+ } else if ( elem.attachEvent ) {
+ elem.attachEvent( "on" + type, eventHandle );
+ }
+ }
+ }
+
+ if ( special.add ) {
+ special.add.call( elem, handleObj );
+
+ if ( !handleObj.handler.guid ) {
+ handleObj.handler.guid = handler.guid;
+ }
+ }
+
+ // Add to the element's handler list, delegates in front
+ if ( selector ) {
+ handlers.splice( handlers.delegateCount++, 0, handleObj );
+ } else {
+ handlers.push( handleObj );
+ }
+
+ // Keep track of which events have ever been used, for event optimization
+ jQuery.event.global[ type ] = true;
+ }
+
+ // Nullify elem to prevent memory leaks in IE
+ elem = null;
+ },
+
+ global: {},
+
+ // Detach an event or set of events from an element
+ remove: function( elem, types, handler, selector, mappedTypes ) {
+
+ var t, tns, type, origType, namespaces, origCount,
+ j, events, special, eventType, handleObj,
+ elemData = jQuery.hasData( elem ) && jQuery._data( elem );
+
+ if ( !elemData || !(events = elemData.events) ) {
+ return;
+ }
+
+ // Once for each type.namespace in types; type may be omitted
+ types = jQuery.trim( hoverHack( types || "" ) ).split(" ");
+ for ( t = 0; t < types.length; t++ ) {
+ tns = rtypenamespace.exec( types[t] ) || [];
+ type = origType = tns[1];
+ namespaces = tns[2];
+
+ // Unbind all events (on this namespace, if provided) for the element
+ if ( !type ) {
+ for ( type in events ) {
+ jQuery.event.remove( elem, type + types[ t ], handler, selector, true );
+ }
+ continue;
+ }
+
+ special = jQuery.event.special[ type ] || {};
+ type = ( selector? special.delegateType : special.bindType ) || type;
+ eventType = events[ type ] || [];
+ origCount = eventType.length;
+ namespaces = namespaces ? new RegExp("(^|\\.)" + namespaces.split(".").sort().join("\\.(?:.*\\.|)") + "(\\.|$)") : null;
+
+ // Remove matching events
+ for ( j = 0; j < eventType.length; j++ ) {
+ handleObj = eventType[ j ];
+
+ if ( ( mappedTypes || origType === handleObj.origType ) &&
+ ( !handler || handler.guid === handleObj.guid ) &&
+ ( !namespaces || namespaces.test( handleObj.namespace ) ) &&
+ ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) {
+ eventType.splice( j--, 1 );
+
+ if ( handleObj.selector ) {
+ eventType.delegateCount--;
+ }
+ if ( special.remove ) {
+ special.remove.call( elem, handleObj );
+ }
+ }
+ }
+
+ // Remove generic event handler if we removed something and no more handlers exist
+ // (avoids potential for endless recursion during removal of special event handlers)
+ if ( eventType.length === 0 && origCount !== eventType.length ) {
+ if ( !special.teardown || special.teardown.call( elem, namespaces, elemData.handle ) === false ) {
+ jQuery.removeEvent( elem, type, elemData.handle );
+ }
+
+ delete events[ type ];
+ }
+ }
+
+ // Remove the expando if it's no longer used
+ if ( jQuery.isEmptyObject( events ) ) {
+ delete elemData.handle;
+
+ // removeData also checks for emptiness and clears the expando if empty
+ // so use it instead of delete
+ jQuery.removeData( elem, "events", true );
+ }
+ },
+
+ // Events that are safe to short-circuit if no handlers are attached.
+ // Native DOM events should not be added, they may have inline handlers.
+ customEvent: {
+ "getData": true,
+ "setData": true,
+ "changeData": true
+ },
+
+ trigger: function( event, data, elem, onlyHandlers ) {
+ // Don't do events on text and comment nodes
+ if ( elem && (elem.nodeType === 3 || elem.nodeType === 8) ) {
+ return;
+ }
+
+ // Event object or event type
+ var cache, exclusive, i, cur, old, ontype, special, handle, eventPath, bubbleType,
+ type = event.type || event,
+ namespaces = [];
+
+ // focus/blur morphs to focusin/out; ensure we're not firing them right now
+ if ( rfocusMorph.test( type + jQuery.event.triggered ) ) {
+ return;
+ }
+
+ if ( type.indexOf( "!" ) >= 0 ) {
+ // Exclusive events trigger only for the exact event (no namespaces)
+ type = type.slice(0, -1);
+ exclusive = true;
+ }
+
+ if ( type.indexOf( "." ) >= 0 ) {
+ // Namespaced trigger; create a regexp to match event type in handle()
+ namespaces = type.split(".");
+ type = namespaces.shift();
+ namespaces.sort();
+ }
+
+ if ( (!elem || jQuery.event.customEvent[ type ]) && !jQuery.event.global[ type ] ) {
+ // No jQuery handlers for this event type, and it can't have inline handlers
+ return;
+ }
+
+ // Caller can pass in an Event, Object, or just an event type string
+ event = typeof event === "object" ?
+ // jQuery.Event object
+ event[ jQuery.expando ] ? event :
+ // Object literal
+ new jQuery.Event( type, event ) :
+ // Just the event type (string)
+ new jQuery.Event( type );
+
+ event.type = type;
+ event.isTrigger = true;
+ event.exclusive = exclusive;
+ event.namespace = namespaces.join( "." );
+ event.namespace_re = event.namespace? new RegExp("(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)") : null;
+ ontype = type.indexOf( ":" ) < 0 ? "on" + type : "";
+
+ // Handle a global trigger
+ if ( !elem ) {
+
+ // TODO: Stop taunting the data cache; remove global events and always attach to document
+ cache = jQuery.cache;
+ for ( i in cache ) {
+ if ( cache[ i ].events && cache[ i ].events[ type ] ) {
+ jQuery.event.trigger( event, data, cache[ i ].handle.elem, true );
+ }
+ }
+ return;
+ }
+
+ // Clean up the event in case it is being reused
+ event.result = undefined;
+ if ( !event.target ) {
+ event.target = elem;
+ }
+
+ // Clone any incoming data and prepend the event, creating the handler arg list
+ data = data != null ? jQuery.makeArray( data ) : [];
+ data.unshift( event );
+
+ // Allow special events to draw outside the lines
+ special = jQuery.event.special[ type ] || {};
+ if ( special.trigger && special.trigger.apply( elem, data ) === false ) {
+ return;
+ }
+
+ // Determine event propagation path in advance, per W3C events spec (#9951)
+ // Bubble up to document, then to window; watch for a global ownerDocument var (#9724)
+ eventPath = [[ elem, special.bindType || type ]];
+ if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) {
+
+ bubbleType = special.delegateType || type;
+ cur = rfocusMorph.test( bubbleType + type ) ? elem : elem.parentNode;
+ for ( old = elem; cur; cur = cur.parentNode ) {
+ eventPath.push([ cur, bubbleType ]);
+ old = cur;
+ }
+
+ // Only add window if we got to document (e.g., not plain obj or detached DOM)
+ if ( old === (elem.ownerDocument || document) ) {
+ eventPath.push([ old.defaultView || old.parentWindow || window, bubbleType ]);
+ }
+ }
+
+ // Fire handlers on the event path
+ for ( i = 0; i < eventPath.length && !event.isPropagationStopped(); i++ ) {
+
+ cur = eventPath[i][0];
+ event.type = eventPath[i][1];
+
+ handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" );
+ if ( handle ) {
+ handle.apply( cur, data );
+ }
+ // Note that this is a bare JS function and not a jQuery handler
+ handle = ontype && cur[ ontype ];
+ if ( handle && jQuery.acceptData( cur ) && handle.apply && handle.apply( cur, data ) === false ) {
+ event.preventDefault();
+ }
+ }
+ event.type = type;
+
+ // If nobody prevented the default action, do it now
+ if ( !onlyHandlers && !event.isDefaultPrevented() ) {
+
+ if ( (!special._default || special._default.apply( elem.ownerDocument, data ) === false) &&
+ !(type === "click" && jQuery.nodeName( elem, "a" )) && jQuery.acceptData( elem ) ) {
+
+ // Call a native DOM method on the target with the same name name as the event.
+ // Can't use an .isFunction() check here because IE6/7 fails that test.
+ // Don't do default actions on window, that's where global variables be (#6170)
+ // IE<9 dies on focus/blur to hidden element (#1486)
+ if ( ontype && elem[ type ] && ((type !== "focus" && type !== "blur") || event.target.offsetWidth !== 0) && !jQuery.isWindow( elem ) ) {
+
+ // Don't re-trigger an onFOO event when we call its FOO() method
+ old = elem[ ontype ];
+
+ if ( old ) {
+ elem[ ontype ] = null;
+ }
+
+ // Prevent re-triggering of the same event, since we already bubbled it above
+ jQuery.event.triggered = type;
+ elem[ type ]();
+ jQuery.event.triggered = undefined;
+
+ if ( old ) {
+ elem[ ontype ] = old;
+ }
+ }
+ }
+ }
+
+ return event.result;
+ },
+
+ dispatch: function( event ) {
+
+ // Make a writable jQuery.Event from the native event object
+ event = jQuery.event.fix( event || window.event );
+
+ var i, j, cur, ret, selMatch, matched, matches, handleObj, sel, related,
+ handlers = ( (jQuery._data( this, "events" ) || {} )[ event.type ] || []),
+ delegateCount = handlers.delegateCount,
+ args = core_slice.call( arguments ),
+ run_all = !event.exclusive && !event.namespace,
+ special = jQuery.event.special[ event.type ] || {},
+ handlerQueue = [];
+
+ // Use the fix-ed jQuery.Event rather than the (read-only) native event
+ args[0] = event;
+ event.delegateTarget = this;
+
+ // Call the preDispatch hook for the mapped type, and let it bail if desired
+ if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) {
+ return;
+ }
+
+ // Determine handlers that should run if there are delegated events
+ // Avoid non-left-click bubbling in Firefox (#3861)
+ if ( delegateCount && !(event.button && event.type === "click") ) {
+
+ for ( cur = event.target; cur != this; cur = cur.parentNode || this ) {
+
+ // Don't process clicks (ONLY) on disabled elements (#6911, #8165, #11382, #11764)
+ if ( cur.disabled !== true || event.type !== "click" ) {
+ selMatch = {};
+ matches = [];
+ for ( i = 0; i < delegateCount; i++ ) {
+ handleObj = handlers[ i ];
+ sel = handleObj.selector;
+
+ if ( selMatch[ sel ] === undefined ) {
+ selMatch[ sel ] = handleObj.needsContext ?
+ jQuery( sel, this ).index( cur ) >= 0 :
+ jQuery.find( sel, this, null, [ cur ] ).length;
+ }
+ if ( selMatch[ sel ] ) {
+ matches.push( handleObj );
+ }
+ }
+ if ( matches.length ) {
+ handlerQueue.push({ elem: cur, matches: matches });
+ }
+ }
+ }
+ }
+
+ // Add the remaining (directly-bound) handlers
+ if ( handlers.length > delegateCount ) {
+ handlerQueue.push({ elem: this, matches: handlers.slice( delegateCount ) });
+ }
+
+ // Run delegates first; they may want to stop propagation beneath us
+ for ( i = 0; i < handlerQueue.length && !event.isPropagationStopped(); i++ ) {
+ matched = handlerQueue[ i ];
+ event.currentTarget = matched.elem;
+
+ for ( j = 0; j < matched.matches.length && !event.isImmediatePropagationStopped(); j++ ) {
+ handleObj = matched.matches[ j ];
+
+ // Triggered event must either 1) be non-exclusive and have no namespace, or
+ // 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace).
+ if ( run_all || (!event.namespace && !handleObj.namespace) || event.namespace_re && event.namespace_re.test( handleObj.namespace ) ) {
+
+ event.data = handleObj.data;
+ event.handleObj = handleObj;
+
+ ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler )
+ .apply( matched.elem, args );
+
+ if ( ret !== undefined ) {
+ event.result = ret;
+ if ( ret === false ) {
+ event.preventDefault();
+ event.stopPropagation();
+ }
+ }
+ }
+ }
+ }
+
+ // Call the postDispatch hook for the mapped type
+ if ( special.postDispatch ) {
+ special.postDispatch.call( this, event );
+ }
+
+ return event.result;
+ },
+
+ // Includes some event props shared by KeyEvent and MouseEvent
+ // *** attrChange attrName relatedNode srcElement are not normalized, non-W3C, deprecated, will be removed in 1.8 ***
+ props: "attrChange attrName relatedNode srcElement altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),
+
+ fixHooks: {},
+
+ keyHooks: {
+ props: "char charCode key keyCode".split(" "),
+ filter: function( event, original ) {
+
+ // Add which for key events
+ if ( event.which == null ) {
+ event.which = original.charCode != null ? original.charCode : original.keyCode;
+ }
+
+ return event;
+ }
+ },
+
+ mouseHooks: {
+ props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),
+ filter: function( event, original ) {
+ var eventDoc, doc, body,
+ button = original.button,
+ fromElement = original.fromElement;
+
+ // Calculate pageX/Y if missing and clientX/Y available
+ if ( event.pageX == null && original.clientX != null ) {
+ eventDoc = event.target.ownerDocument || document;
+ doc = eventDoc.documentElement;
+ body = eventDoc.body;
+
+ event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 );
+ event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 );
+ }
+
+ // Add relatedTarget, if necessary
+ if ( !event.relatedTarget && fromElement ) {
+ event.relatedTarget = fromElement === event.target ? original.toElement : fromElement;
+ }
+
+ // Add which for click: 1 === left; 2 === middle; 3 === right
+ // Note: button is not normalized, so don't use it
+ if ( !event.which && button !== undefined ) {
+ event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) );
+ }
+
+ return event;
+ }
+ },
+
+ fix: function( event ) {
+ if ( event[ jQuery.expando ] ) {
+ return event;
+ }
+
+ // Create a writable copy of the event object and normalize some properties
+ var i, prop,
+ originalEvent = event,
+ fixHook = jQuery.event.fixHooks[ event.type ] || {},
+ copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props;
+
+ event = jQuery.Event( originalEvent );
+
+ for ( i = copy.length; i; ) {
+ prop = copy[ --i ];
+ event[ prop ] = originalEvent[ prop ];
+ }
+
+ // Fix target property, if necessary (#1925, IE 6/7/8 & Safari2)
+ if ( !event.target ) {
+ event.target = originalEvent.srcElement || document;
+ }
+
+ // Target should not be a text node (#504, Safari)
+ if ( event.target.nodeType === 3 ) {
+ event.target = event.target.parentNode;
+ }
+
+ // For mouse/key events, metaKey==false if it's undefined (#3368, #11328; IE6/7/8)
+ event.metaKey = !!event.metaKey;
+
+ return fixHook.filter? fixHook.filter( event, originalEvent ) : event;
+ },
+
+ special: {
+ load: {
+ // Prevent triggered image.load events from bubbling to window.load
+ noBubble: true
+ },
+
+ focus: {
+ delegateType: "focusin"
+ },
+ blur: {
+ delegateType: "focusout"
+ },
+
+ beforeunload: {
+ setup: function( data, namespaces, eventHandle ) {
+ // We only want to do this special case on windows
+ if ( jQuery.isWindow( this ) ) {
+ this.onbeforeunload = eventHandle;
+ }
+ },
+
+ teardown: function( namespaces, eventHandle ) {
+ if ( this.onbeforeunload === eventHandle ) {
+ this.onbeforeunload = null;
+ }
+ }
+ }
+ },
+
+ simulate: function( type, elem, event, bubble ) {
+ // Piggyback on a donor event to simulate a different one.
+ // Fake originalEvent to avoid donor's stopPropagation, but if the
+ // simulated event prevents default then we do the same on the donor.
+ var e = jQuery.extend(
+ new jQuery.Event(),
+ event,
+ { type: type,
+ isSimulated: true,
+ originalEvent: {}
+ }
+ );
+ if ( bubble ) {
+ jQuery.event.trigger( e, null, elem );
+ } else {
+ jQuery.event.dispatch.call( elem, e );
+ }
+ if ( e.isDefaultPrevented() ) {
+ event.preventDefault();
+ }
+ }
+};
+
+// Some plugins are using, but it's undocumented/deprecated and will be removed.
+// The 1.7 special event interface should provide all the hooks needed now.
+jQuery.event.handle = jQuery.event.dispatch;
+
+jQuery.removeEvent = document.removeEventListener ?
+ function( elem, type, handle ) {
+ if ( elem.removeEventListener ) {
+ elem.removeEventListener( type, handle, false );
+ }
+ } :
+ function( elem, type, handle ) {
+ var name = "on" + type;
+
+ if ( elem.detachEvent ) {
+
+ // #8545, #7054, preventing memory leaks for custom events in IE6-8
+ // detachEvent needed property on element, by name of that event, to properly expose it to GC
+ if ( typeof elem[ name ] === "undefined" ) {
+ elem[ name ] = null;
+ }
+
+ elem.detachEvent( name, handle );
+ }
+ };
+
+jQuery.Event = function( src, props ) {
+ // Allow instantiation without the 'new' keyword
+ if ( !(this instanceof jQuery.Event) ) {
+ return new jQuery.Event( src, props );
+ }
+
+ // Event object
+ if ( src && src.type ) {
+ this.originalEvent = src;
+ this.type = src.type;
+
+ // Events bubbling up the document may have been marked as prevented
+ // by a handler lower down the tree; reflect the correct value.
+ this.isDefaultPrevented = ( src.defaultPrevented || src.returnValue === false ||
+ src.getPreventDefault && src.getPreventDefault() ) ? returnTrue : returnFalse;
+
+ // Event type
+ } else {
+ this.type = src;
+ }
+
+ // Put explicitly provided properties onto the event object
+ if ( props ) {
+ jQuery.extend( this, props );
+ }
+
+ // Create a timestamp if incoming event doesn't have one
+ this.timeStamp = src && src.timeStamp || jQuery.now();
+
+ // Mark it as fixed
+ this[ jQuery.expando ] = true;
+};
+
+function returnFalse() {
+ return false;
+}
+function returnTrue() {
+ return true;
+}
+
+// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding
+// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html
+jQuery.Event.prototype = {
+ preventDefault: function() {
+ this.isDefaultPrevented = returnTrue;
+
+ var e = this.originalEvent;
+ if ( !e ) {
+ return;
+ }
+
+ // if preventDefault exists run it on the original event
+ if ( e.preventDefault ) {
+ e.preventDefault();
+
+ // otherwise set the returnValue property of the original event to false (IE)
+ } else {
+ e.returnValue = false;
+ }
+ },
+ stopPropagation: function() {
+ this.isPropagationStopped = returnTrue;
+
+ var e = this.originalEvent;
+ if ( !e ) {
+ return;
+ }
+ // if stopPropagation exists run it on the original event
+ if ( e.stopPropagation ) {
+ e.stopPropagation();
+ }
+ // otherwise set the cancelBubble property of the original event to true (IE)
+ e.cancelBubble = true;
+ },
+ stopImmediatePropagation: function() {
+ this.isImmediatePropagationStopped = returnTrue;
+ this.stopPropagation();
+ },
+ isDefaultPrevented: returnFalse,
+ isPropagationStopped: returnFalse,
+ isImmediatePropagationStopped: returnFalse
+};
+
+// Create mouseenter/leave events using mouseover/out and event-time checks
+jQuery.each({
+ mouseenter: "mouseover",
+ mouseleave: "mouseout"
+}, function( orig, fix ) {
+ jQuery.event.special[ orig ] = {
+ delegateType: fix,
+ bindType: fix,
+
+ handle: function( event ) {
+ var ret,
+ target = this,
+ related = event.relatedTarget,
+ handleObj = event.handleObj,
+ selector = handleObj.selector;
+
+ // For mousenter/leave call the handler if related is outside the target.
+ // NB: No relatedTarget if the mouse left/entered the browser window
+ if ( !related || (related !== target && !jQuery.contains( target, related )) ) {
+ event.type = handleObj.origType;
+ ret = handleObj.handler.apply( this, arguments );
+ event.type = fix;
+ }
+ return ret;
+ }
+ };
+});
+
+// IE submit delegation
+if ( !jQuery.support.submitBubbles ) {
+
+ jQuery.event.special.submit = {
+ setup: function() {
+ // Only need this for delegated form submit events
+ if ( jQuery.nodeName( this, "form" ) ) {
+ return false;
+ }
+
+ // Lazy-add a submit handler when a descendant form may potentially be submitted
+ jQuery.event.add( this, "click._submit keypress._submit", function( e ) {
+ // Node name check avoids a VML-related crash in IE (#9807)
+ var elem = e.target,
+ form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined;
+ if ( form && !jQuery._data( form, "_submit_attached" ) ) {
+ jQuery.event.add( form, "submit._submit", function( event ) {
+ event._submit_bubble = true;
+ });
+ jQuery._data( form, "_submit_attached", true );
+ }
+ });
+ // return undefined since we don't need an event listener
+ },
+
+ postDispatch: function( event ) {
+ // If form was submitted by the user, bubble the event up the tree
+ if ( event._submit_bubble ) {
+ delete event._submit_bubble;
+ if ( this.parentNode && !event.isTrigger ) {
+ jQuery.event.simulate( "submit", this.parentNode, event, true );
+ }
+ }
+ },
+
+ teardown: function() {
+ // Only need this for delegated form submit events
+ if ( jQuery.nodeName( this, "form" ) ) {
+ return false;
+ }
+
+ // Remove delegated handlers; cleanData eventually reaps submit handlers attached above
+ jQuery.event.remove( this, "._submit" );
+ }
+ };
+}
+
+// IE change delegation and checkbox/radio fix
+if ( !jQuery.support.changeBubbles ) {
+
+ jQuery.event.special.change = {
+
+ setup: function() {
+
+ if ( rformElems.test( this.nodeName ) ) {
+ // IE doesn't fire change on a check/radio until blur; trigger it on click
+ // after a propertychange. Eat the blur-change in special.change.handle.
+ // This still fires onchange a second time for check/radio after blur.
+ if ( this.type === "checkbox" || this.type === "radio" ) {
+ jQuery.event.add( this, "propertychange._change", function( event ) {
+ if ( event.originalEvent.propertyName === "checked" ) {
+ this._just_changed = true;
+ }
+ });
+ jQuery.event.add( this, "click._change", function( event ) {
+ if ( this._just_changed && !event.isTrigger ) {
+ this._just_changed = false;
+ }
+ // Allow triggered, simulated change events (#11500)
+ jQuery.event.simulate( "change", this, event, true );
+ });
+ }
+ return false;
+ }
+ // Delegated event; lazy-add a change handler on descendant inputs
+ jQuery.event.add( this, "beforeactivate._change", function( e ) {
+ var elem = e.target;
+
+ if ( rformElems.test( elem.nodeName ) && !jQuery._data( elem, "_change_attached" ) ) {
+ jQuery.event.add( elem, "change._change", function( event ) {
+ if ( this.parentNode && !event.isSimulated && !event.isTrigger ) {
+ jQuery.event.simulate( "change", this.parentNode, event, true );
+ }
+ });
+ jQuery._data( elem, "_change_attached", true );
+ }
+ });
+ },
+
+ handle: function( event ) {
+ var elem = event.target;
+
+ // Swallow native change events from checkbox/radio, we already triggered them above
+ if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) {
+ return event.handleObj.handler.apply( this, arguments );
+ }
+ },
+
+ teardown: function() {
+ jQuery.event.remove( this, "._change" );
+
+ return !rformElems.test( this.nodeName );
+ }
+ };
+}
+
+// Create "bubbling" focus and blur events
+if ( !jQuery.support.focusinBubbles ) {
+ jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) {
+
+ // Attach a single capturing handler while someone wants focusin/focusout
+ var attaches = 0,
+ handler = function( event ) {
+ jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true );
+ };
+
+ jQuery.event.special[ fix ] = {
+ setup: function() {
+ if ( attaches++ === 0 ) {
+ document.addEventListener( orig, handler, true );
+ }
+ },
+ teardown: function() {
+ if ( --attaches === 0 ) {
+ document.removeEventListener( orig, handler, true );
+ }
+ }
+ };
+ });
+}
+
+jQuery.fn.extend({
+
+ on: function( types, selector, data, fn, /*INTERNAL*/ one ) {
+ var origFn, type;
+
+ // Types can be a map of types/handlers
+ if ( typeof types === "object" ) {
+ // ( types-Object, selector, data )
+ if ( typeof selector !== "string" ) { // && selector != null
+ // ( types-Object, data )
+ data = data || selector;
+ selector = undefined;
+ }
+ for ( type in types ) {
+ this.on( type, selector, data, types[ type ], one );
+ }
+ return this;
+ }
+
+ if ( data == null && fn == null ) {
+ // ( types, fn )
+ fn = selector;
+ data = selector = undefined;
+ } else if ( fn == null ) {
+ if ( typeof selector === "string" ) {
+ // ( types, selector, fn )
+ fn = data;
+ data = undefined;
+ } else {
+ // ( types, data, fn )
+ fn = data;
+ data = selector;
+ selector = undefined;
+ }
+ }
+ if ( fn === false ) {
+ fn = returnFalse;
+ } else if ( !fn ) {
+ return this;
+ }
+
+ if ( one === 1 ) {
+ origFn = fn;
+ fn = function( event ) {
+ // Can use an empty set, since event contains the info
+ jQuery().off( event );
+ return origFn.apply( this, arguments );
+ };
+ // Use same guid so caller can remove using origFn
+ fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ );
+ }
+ return this.each( function() {
+ jQuery.event.add( this, types, fn, data, selector );
+ });
+ },
+ one: function( types, selector, data, fn ) {
+ return this.on( types, selector, data, fn, 1 );
+ },
+ off: function( types, selector, fn ) {
+ var handleObj, type;
+ if ( types && types.preventDefault && types.handleObj ) {
+ // ( event ) dispatched jQuery.Event
+ handleObj = types.handleObj;
+ jQuery( types.delegateTarget ).off(
+ handleObj.namespace ? handleObj.origType + "." + handleObj.namespace : handleObj.origType,
+ handleObj.selector,
+ handleObj.handler
+ );
+ return this;
+ }
+ if ( typeof types === "object" ) {
+ // ( types-object [, selector] )
+ for ( type in types ) {
+ this.off( type, selector, types[ type ] );
+ }
+ return this;
+ }
+ if ( selector === false || typeof selector === "function" ) {
+ // ( types [, fn] )
+ fn = selector;
+ selector = undefined;
+ }
+ if ( fn === false ) {
+ fn = returnFalse;
+ }
+ return this.each(function() {
+ jQuery.event.remove( this, types, fn, selector );
+ });
+ },
+
+ bind: function( types, data, fn ) {
+ return this.on( types, null, data, fn );
+ },
+ unbind: function( types, fn ) {
+ return this.off( types, null, fn );
+ },
+
+ live: function( types, data, fn ) {
+ jQuery( this.context ).on( types, this.selector, data, fn );
+ return this;
+ },
+ die: function( types, fn ) {
+ jQuery( this.context ).off( types, this.selector || "**", fn );
+ return this;
+ },
+
+ delegate: function( selector, types, data, fn ) {
+ return this.on( types, selector, data, fn );
+ },
+ undelegate: function( selector, types, fn ) {
+ // ( namespace ) or ( selector, types [, fn] )
+ return arguments.length === 1 ? this.off( selector, "**" ) : this.off( types, selector || "**", fn );
+ },
+
+ trigger: function( type, data ) {
+ return this.each(function() {
+ jQuery.event.trigger( type, data, this );
+ });
+ },
+ triggerHandler: function( type, data ) {
+ if ( this[0] ) {
+ return jQuery.event.trigger( type, data, this[0], true );
+ }
+ },
+
+ toggle: function( fn ) {
+ // Save reference to arguments for access in closure
+ var args = arguments,
+ guid = fn.guid || jQuery.guid++,
+ i = 0,
+ toggler = function( event ) {
+ // Figure out which function to execute
+ var lastToggle = ( jQuery._data( this, "lastToggle" + fn.guid ) || 0 ) % i;
+ jQuery._data( this, "lastToggle" + fn.guid, lastToggle + 1 );
+
+ // Make sure that clicks stop
+ event.preventDefault();
+
+ // and execute the function
+ return args[ lastToggle ].apply( this, arguments ) || false;
+ };
+
+ // link all the functions, so any of them can unbind this click handler
+ toggler.guid = guid;
+ while ( i < args.length ) {
+ args[ i++ ].guid = guid;
+ }
+
+ return this.click( toggler );
+ },
+
+ hover: function( fnOver, fnOut ) {
+ return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver );
+ }
+});
+
+jQuery.each( ("blur focus focusin focusout load resize scroll unload click dblclick " +
+ "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " +
+ "change select submit keydown keypress keyup error contextmenu").split(" "), function( i, name ) {
+
+ // Handle event binding
+ jQuery.fn[ name ] = function( data, fn ) {
+ if ( fn == null ) {
+ fn = data;
+ data = null;
+ }
+
+ return arguments.length > 0 ?
+ this.on( name, null, data, fn ) :
+ this.trigger( name );
+ };
+
+ if ( rkeyEvent.test( name ) ) {
+ jQuery.event.fixHooks[ name ] = jQuery.event.keyHooks;
+ }
+
+ if ( rmouseEvent.test( name ) ) {
+ jQuery.event.fixHooks[ name ] = jQuery.event.mouseHooks;
+ }
+});
+/*!
+ * Sizzle CSS Selector Engine
+ * Copyright 2012 jQuery Foundation and other contributors
+ * Released under the MIT license
+ * http://sizzlejs.com/
+ */
+(function( window, undefined ) {
+
+var cachedruns,
+ assertGetIdNotName,
+ Expr,
+ getText,
+ isXML,
+ contains,
+ compile,
+ sortOrder,
+ hasDuplicate,
+ outermostContext,
+
+ baseHasDuplicate = true,
+ strundefined = "undefined",
+
+ expando = ( "sizcache" + Math.random() ).replace( ".", "" ),
+
+ Token = String,
+ document = window.document,
+ docElem = document.documentElement,
+ dirruns = 0,
+ done = 0,
+ pop = [].pop,
+ push = [].push,
+ slice = [].slice,
+ // Use a stripped-down indexOf if a native one is unavailable
+ indexOf = [].indexOf || function( elem ) {
+ var i = 0,
+ len = this.length;
+ for ( ; i < len; i++ ) {
+ if ( this[i] === elem ) {
+ return i;
+ }
+ }
+ return -1;
+ },
+
+ // Augment a function for special use by Sizzle
+ markFunction = function( fn, value ) {
+ fn[ expando ] = value == null || value;
+ return fn;
+ },
+
+ createCache = function() {
+ var cache = {},
+ keys = [];
+
+ return markFunction(function( key, value ) {
+ // Only keep the most recent entries
+ if ( keys.push( key ) > Expr.cacheLength ) {
+ delete cache[ keys.shift() ];
+ }
+
+ // Retrieve with (key + " ") to avoid collision with native Object.prototype properties (see Issue #157)
+ return (cache[ key + " " ] = value);
+ }, cache );
+ },
+
+ classCache = createCache(),
+ tokenCache = createCache(),
+ compilerCache = createCache(),
+
+ // Regex
+
+ // Whitespace characters http://www.w3.org/TR/css3-selectors/#whitespace
+ whitespace = "[\\x20\\t\\r\\n\\f]",
+ // http://www.w3.org/TR/css3-syntax/#characters
+ characterEncoding = "(?:\\\\.|[-\\w]|[^\\x00-\\xa0])+",
+
+ // Loosely modeled on CSS identifier characters
+ // An unquoted value should be a CSS identifier (http://www.w3.org/TR/css3-selectors/#attribute-selectors)
+ // Proper syntax: http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier
+ identifier = characterEncoding.replace( "w", "w#" ),
+
+ // Acceptable operators http://www.w3.org/TR/selectors/#attribute-selectors
+ operators = "([*^$|!~]?=)",
+ attributes = "\\[" + whitespace + "*(" + characterEncoding + ")" + whitespace +
+ "*(?:" + operators + whitespace + "*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|(" + identifier + ")|)|)" + whitespace + "*\\]",
+
+ // Prefer arguments not in parens/brackets,
+ // then attribute selectors and non-pseudos (denoted by :),
+ // then anything else
+ // These preferences are here to reduce the number of selectors
+ // needing tokenize in the PSEUDO preFilter
+ pseudos = ":(" + characterEncoding + ")(?:\\((?:(['\"])((?:\\\\.|[^\\\\])*?)\\2|([^()[\\]]*|(?:(?:" + attributes + ")|[^:]|\\\\.)*|.*))\\)|)",
+
+ // For matchExpr.POS and matchExpr.needsContext
+ pos = ":(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace +
+ "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)",
+
+ // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter
+ rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ),
+
+ rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ),
+ rcombinators = new RegExp( "^" + whitespace + "*([\\x20\\t\\r\\n\\f>+~])" + whitespace + "*" ),
+ rpseudo = new RegExp( pseudos ),
+
+ // Easily-parseable/retrievable ID or TAG or CLASS selectors
+ rquickExpr = /^(?:#([\w\-]+)|(\w+)|\.([\w\-]+))$/,
+
+ rnot = /^:not/,
+ rsibling = /[\x20\t\r\n\f]*[+~]/,
+ rendsWithNot = /:not\($/,
+
+ rheader = /h\d/i,
+ rinputs = /input|select|textarea|button/i,
+
+ rbackslash = /\\(?!\\)/g,
+
+ matchExpr = {
+ "ID": new RegExp( "^#(" + characterEncoding + ")" ),
+ "CLASS": new RegExp( "^\\.(" + characterEncoding + ")" ),
+ "NAME": new RegExp( "^\\[name=['\"]?(" + characterEncoding + ")['\"]?\\]" ),
+ "TAG": new RegExp( "^(" + characterEncoding.replace( "w", "w*" ) + ")" ),
+ "ATTR": new RegExp( "^" + attributes ),
+ "PSEUDO": new RegExp( "^" + pseudos ),
+ "POS": new RegExp( pos, "i" ),
+ "CHILD": new RegExp( "^:(only|nth|first|last)-child(?:\\(" + whitespace +
+ "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace +
+ "*(\\d+)|))" + whitespace + "*\\)|)", "i" ),
+ // For use in libraries implementing .is()
+ "needsContext": new RegExp( "^" + whitespace + "*[>+~]|" + pos, "i" )
+ },
+
+ // Support
+
+ // Used for testing something on an element
+ assert = function( fn ) {
+ var div = document.createElement("div");
+
+ try {
+ return fn( div );
+ } catch (e) {
+ return false;
+ } finally {
+ // release memory in IE
+ div = null;
+ }
+ },
+
+ // Check if getElementsByTagName("*") returns only elements
+ assertTagNameNoComments = assert(function( div ) {
+ div.appendChild( document.createComment("") );
+ return !div.getElementsByTagName("*").length;
+ }),
+
+ // Check if getAttribute returns normalized href attributes
+ assertHrefNotNormalized = assert(function( div ) {
+ div.innerHTML = "<a href='#'></a>";
+ return div.firstChild && typeof div.firstChild.getAttribute !== strundefined &&
+ div.firstChild.getAttribute("href") === "#";
+ }),
+
+ // Check if attributes should be retrieved by attribute nodes
+ assertAttributes = assert(function( div ) {
+ div.innerHTML = "<select></select>";
+ var type = typeof div.lastChild.getAttribute("multiple");
+ // IE8 returns a string for some attributes even when not present
+ return type !== "boolean" && type !== "string";
+ }),
+
+ // Check if getElementsByClassName can be trusted
+ assertUsableClassName = assert(function( div ) {
+ // Opera can't find a second classname (in 9.6)
+ div.innerHTML = "<div class='hidden e'></div><div class='hidden'></div>";
+ if ( !div.getElementsByClassName || !div.getElementsByClassName("e").length ) {
+ return false;
+ }
+
+ // Safari 3.2 caches class attributes and doesn't catch changes
+ div.lastChild.className = "e";
+ return div.getElementsByClassName("e").length === 2;
+ }),
+
+ // Check if getElementById returns elements by name
+ // Check if getElementsByName privileges form controls or returns elements by ID
+ assertUsableName = assert(function( div ) {
+ // Inject content
+ div.id = expando + 0;
+ div.innerHTML = "<a name='" + expando + "'></a><div name='" + expando + "'></div>";
+ docElem.insertBefore( div, docElem.firstChild );
+
+ // Test
+ var pass = document.getElementsByName &&
+ // buggy browsers will return fewer than the correct 2
+ document.getElementsByName( expando ).length === 2 +
+ // buggy browsers will return more than the correct 0
+ document.getElementsByName( expando + 0 ).length;
+ assertGetIdNotName = !document.getElementById( expando );
+
+ // Cleanup
+ docElem.removeChild( div );
+
+ return pass;
+ });
+
+// If slice is not available, provide a backup
+try {
+ slice.call( docElem.childNodes, 0 )[0].nodeType;
+} catch ( e ) {
+ slice = function( i ) {
+ var elem,
+ results = [];
+ for ( ; (elem = this[i]); i++ ) {
+ results.push( elem );
+ }
+ return results;
+ };
+}
+
+function Sizzle( selector, context, results, seed ) {
+ results = results || [];
+ context = context || document;
+ var match, elem, xml, m,
+ nodeType = context.nodeType;
+
+ if ( !selector || typeof selector !== "string" ) {
+ return results;
+ }
+
+ if ( nodeType !== 1 && nodeType !== 9 ) {
+ return [];
+ }
+
+ xml = isXML( context );
+
+ if ( !xml && !seed ) {
+ if ( (match = rquickExpr.exec( selector )) ) {
+ // Speed-up: Sizzle("#ID")
+ if ( (m = match[1]) ) {
+ if ( nodeType === 9 ) {
+ elem = context.getElementById( m );
+ // Check parentNode to catch when Blackberry 4.6 returns
+ // nodes that are no longer in the document #6963
+ if ( elem && elem.parentNode ) {
+ // Handle the case where IE, Opera, and Webkit return items
+ // by name instead of ID
+ if ( elem.id === m ) {
+ results.push( elem );
+ return results;
+ }
+ } else {
+ return results;
+ }
+ } else {
+ // Context is not a document
+ if ( context.ownerDocument && (elem = context.ownerDocument.getElementById( m )) &&
+ contains( context, elem ) && elem.id === m ) {
+ results.push( elem );
+ return results;
+ }
+ }
+
+ // Speed-up: Sizzle("TAG")
+ } else if ( match[2] ) {
+ push.apply( results, slice.call(context.getElementsByTagName( selector ), 0) );
+ return results;
+
+ // Speed-up: Sizzle(".CLASS")
+ } else if ( (m = match[3]) && assertUsableClassName && context.getElementsByClassName ) {
+ push.apply( results, slice.call(context.getElementsByClassName( m ), 0) );
+ return results;
+ }
+ }
+ }
+
+ // All others
+ return select( selector.replace( rtrim, "$1" ), context, results, seed, xml );
+}
+
+Sizzle.matches = function( expr, elements ) {
+ return Sizzle( expr, null, null, elements );
+};
+
+Sizzle.matchesSelector = function( elem, expr ) {
+ return Sizzle( expr, null, null, [ elem ] ).length > 0;
+};
+
+// Returns a function to use in pseudos for input types
+function createInputPseudo( type ) {
+ return function( elem ) {
+ var name = elem.nodeName.toLowerCase();
+ return name === "input" && elem.type === type;
+ };
+}
+
+// Returns a function to use in pseudos for buttons
+function createButtonPseudo( type ) {
+ return function( elem ) {
+ var name = elem.nodeName.toLowerCase();
+ return (name === "input" || name === "button") && elem.type === type;
+ };
+}
+
+// Returns a function to use in pseudos for positionals
+function createPositionalPseudo( fn ) {
+ return markFunction(function( argument ) {
+ argument = +argument;
+ return markFunction(function( seed, matches ) {
+ var j,
+ matchIndexes = fn( [], seed.length, argument ),
+ i = matchIndexes.length;
+
+ // Match elements found at the specified indexes
+ while ( i-- ) {
+ if ( seed[ (j = matchIndexes[i]) ] ) {
+ seed[j] = !(matches[j] = seed[j]);
+ }
+ }
+ });
+ });
+}
+
+/**
+ * Utility function for retrieving the text value of an array of DOM nodes
+ * @param {Array|Element} elem
+ */
+getText = Sizzle.getText = function( elem ) {
+ var node,
+ ret = "",
+ i = 0,
+ nodeType = elem.nodeType;
+
+ if ( nodeType ) {
+ if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) {
+ // Use textContent for elements
+ // innerText usage removed for consistency of new lines (see #11153)
+ if ( typeof elem.textContent === "string" ) {
+ return elem.textContent;
+ } else {
+ // Traverse its children
+ for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) {
+ ret += getText( elem );
+ }
+ }
+ } else if ( nodeType === 3 || nodeType === 4 ) {
+ return elem.nodeValue;
+ }
+ // Do not include comment or processing instruction nodes
+ } else {
+
+ // If no nodeType, this is expected to be an array
+ for ( ; (node = elem[i]); i++ ) {
+ // Do not traverse comment nodes
+ ret += getText( node );
+ }
+ }
+ return ret;
+};
+
+isXML = Sizzle.isXML = function( elem ) {
+ // documentElement is verified for cases where it doesn't yet exist
+ // (such as loading iframes in IE - #4833)
+ var documentElement = elem && (elem.ownerDocument || elem).documentElement;
+ return documentElement ? documentElement.nodeName !== "HTML" : false;
+};
+
+// Element contains another
+contains = Sizzle.contains = docElem.contains ?
+ function( a, b ) {
+ var adown = a.nodeType === 9 ? a.documentElement : a,
+ bup = b && b.parentNode;
+ return a === bup || !!( bup && bup.nodeType === 1 && adown.contains && adown.contains(bup) );
+ } :
+ docElem.compareDocumentPosition ?
+ function( a, b ) {
+ return b && !!( a.compareDocumentPosition( b ) & 16 );
+ } :
+ function( a, b ) {
+ while ( (b = b.parentNode) ) {
+ if ( b === a ) {
+ return true;
+ }
+ }
+ return false;
+ };
+
+Sizzle.attr = function( elem, name ) {
+ var val,
+ xml = isXML( elem );
+
+ if ( !xml ) {
+ name = name.toLowerCase();
+ }
+ if ( (val = Expr.attrHandle[ name ]) ) {
+ return val( elem );
+ }
+ if ( xml || assertAttributes ) {
+ return elem.getAttribute( name );
+ }
+ val = elem.getAttributeNode( name );
+ return val ?
+ typeof elem[ name ] === "boolean" ?
+ elem[ name ] ? name : null :
+ val.specified ? val.value : null :
+ null;
+};
+
+Expr = Sizzle.selectors = {
+
+ // Can be adjusted by the user
+ cacheLength: 50,
+
+ createPseudo: markFunction,
+
+ match: matchExpr,
+
+ // IE6/7 return a modified href
+ attrHandle: assertHrefNotNormalized ?
+ {} :
+ {
+ "href": function( elem ) {
+ return elem.getAttribute( "href", 2 );
+ },
+ "type": function( elem ) {
+ return elem.getAttribute("type");
+ }
+ },
+
+ find: {
+ "ID": assertGetIdNotName ?
+ function( id, context, xml ) {
+ if ( typeof context.getElementById !== strundefined && !xml ) {
+ var m = context.getElementById( id );
+ // Check parentNode to catch when Blackberry 4.6 returns
+ // nodes that are no longer in the document #6963
+ return m && m.parentNode ? [m] : [];
+ }
+ } :
+ function( id, context, xml ) {
+ if ( typeof context.getElementById !== strundefined && !xml ) {
+ var m = context.getElementById( id );
+
+ return m ?
+ m.id === id || typeof m.getAttributeNode !== strundefined && m.getAttributeNode("id").value === id ?
+ [m] :
+ undefined :
+ [];
+ }
+ },
+
+ "TAG": assertTagNameNoComments ?
+ function( tag, context ) {
+ if ( typeof context.getElementsByTagName !== strundefined ) {
+ return context.getElementsByTagName( tag );
+ }
+ } :
+ function( tag, context ) {
+ var results = context.getElementsByTagName( tag );
+
+ // Filter out possible comments
+ if ( tag === "*" ) {
+ var elem,
+ tmp = [],
+ i = 0;
+
+ for ( ; (elem = results[i]); i++ ) {
+ if ( elem.nodeType === 1 ) {
+ tmp.push( elem );
+ }
+ }
+
+ return tmp;
+ }
+ return results;
+ },
+
+ "NAME": assertUsableName && function( tag, context ) {
+ if ( typeof context.getElementsByName !== strundefined ) {
+ return context.getElementsByName( name );
+ }
+ },
+
+ "CLASS": assertUsableClassName && function( className, context, xml ) {
+ if ( typeof context.getElementsByClassName !== strundefined && !xml ) {
+ return context.getElementsByClassName( className );
+ }
+ }
+ },
+
+ relative: {
+ ">": { dir: "parentNode", first: true },
+ " ": { dir: "parentNode" },
+ "+": { dir: "previousSibling", first: true },
+ "~": { dir: "previousSibling" }
+ },
+
+ preFilter: {
+ "ATTR": function( match ) {
+ match[1] = match[1].replace( rbackslash, "" );
+
+ // Move the given value to match[3] whether quoted or unquoted
+ match[3] = ( match[4] || match[5] || "" ).replace( rbackslash, "" );
+
+ if ( match[2] === "~=" ) {
+ match[3] = " " + match[3] + " ";
+ }
+
+ return match.slice( 0, 4 );
+ },
+
+ "CHILD": function( match ) {
+ /* matches from matchExpr["CHILD"]
+ 1 type (only|nth|...)
+ 2 argument (even|odd|\d*|\d*n([+-]\d+)?|...)
+ 3 xn-component of xn+y argument ([+-]?\d*n|)
+ 4 sign of xn-component
+ 5 x of xn-component
+ 6 sign of y-component
+ 7 y of y-component
+ */
+ match[1] = match[1].toLowerCase();
+
+ if ( match[1] === "nth" ) {
+ // nth-child requires argument
+ if ( !match[2] ) {
+ Sizzle.error( match[0] );
+ }
+
+ // numeric x and y parameters for Expr.filter.CHILD
+ // remember that false/true cast respectively to 0/1
+ match[3] = +( match[3] ? match[4] + (match[5] || 1) : 2 * ( match[2] === "even" || match[2] === "odd" ) );
+ match[4] = +( ( match[6] + match[7] ) || match[2] === "odd" );
+
+ // other types prohibit arguments
+ } else if ( match[2] ) {
+ Sizzle.error( match[0] );
+ }
+
+ return match;
+ },
+
+ "PSEUDO": function( match ) {
+ var unquoted, excess;
+ if ( matchExpr["CHILD"].test( match[0] ) ) {
+ return null;
+ }
+
+ if ( match[3] ) {
+ match[2] = match[3];
+ } else if ( (unquoted = match[4]) ) {
+ // Only check arguments that contain a pseudo
+ if ( rpseudo.test(unquoted) &&
+ // Get excess from tokenize (recursively)
+ (excess = tokenize( unquoted, true )) &&
+ // advance to the next closing parenthesis
+ (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) {
+
+ // excess is a negative index
+ unquoted = unquoted.slice( 0, excess );
+ match[0] = match[0].slice( 0, excess );
+ }
+ match[2] = unquoted;
+ }
+
+ // Return only captures needed by the pseudo filter method (type and argument)
+ return match.slice( 0, 3 );
+ }
+ },
+
+ filter: {
+ "ID": assertGetIdNotName ?
+ function( id ) {
+ id = id.replace( rbackslash, "" );
+ return function( elem ) {
+ return elem.getAttribute("id") === id;
+ };
+ } :
+ function( id ) {
+ id = id.replace( rbackslash, "" );
+ return function( elem ) {
+ var node = typeof elem.getAttributeNode !== strundefined && elem.getAttributeNode("id");
+ return node && node.value === id;
+ };
+ },
+
+ "TAG": function( nodeName ) {
+ if ( nodeName === "*" ) {
+ return function() { return true; };
+ }
+ nodeName = nodeName.replace( rbackslash, "" ).toLowerCase();
+
+ return function( elem ) {
+ return elem.nodeName && elem.nodeName.toLowerCase() === nodeName;
+ };
+ },
+
+ "CLASS": function( className ) {
+ var pattern = classCache[ expando ][ className + " " ];
+
+ return pattern ||
+ (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) &&
+ classCache( className, function( elem ) {
+ return pattern.test( elem.className || (typeof elem.getAttribute !== strundefined && elem.getAttribute("class")) || "" );
+ });
+ },
+
+ "ATTR": function( name, operator, check ) {
+ return function( elem, context ) {
+ var result = Sizzle.attr( elem, name );
+
+ if ( result == null ) {
+ return operator === "!=";
+ }
+ if ( !operator ) {
+ return true;
+ }
+
+ result += "";
+
+ return operator === "=" ? result === check :
+ operator === "!=" ? result !== check :
+ operator === "^=" ? check && result.indexOf( check ) === 0 :
+ operator === "*=" ? check && result.indexOf( check ) > -1 :
+ operator === "$=" ? check && result.substr( result.length - check.length ) === check :
+ operator === "~=" ? ( " " + result + " " ).indexOf( check ) > -1 :
+ operator === "|=" ? result === check || result.substr( 0, check.length + 1 ) === check + "-" :
+ false;
+ };
+ },
+
+ "CHILD": function( type, argument, first, last ) {
+
+ if ( type === "nth" ) {
+ return function( elem ) {
+ var node, diff,
+ parent = elem.parentNode;
+
+ if ( first === 1 && last === 0 ) {
+ return true;
+ }
+
+ if ( parent ) {
+ diff = 0;
+ for ( node = parent.firstChild; node; node = node.nextSibling ) {
+ if ( node.nodeType === 1 ) {
+ diff++;
+ if ( elem === node ) {
+ break;
+ }
+ }
+ }
+ }
+
+ // Incorporate the offset (or cast to NaN), then check against cycle size
+ diff -= last;
+ return diff === first || ( diff % first === 0 && diff / first >= 0 );
+ };
+ }
+
+ return function( elem ) {
+ var node = elem;
+
+ switch ( type ) {
+ case "only":
+ case "first":
+ while ( (node = node.previousSibling) ) {
+ if ( node.nodeType === 1 ) {
+ return false;
+ }
+ }
+
+ if ( type === "first" ) {
+ return true;
+ }
+
+ node = elem;
+
+ /* falls through */
+ case "last":
+ while ( (node = node.nextSibling) ) {
+ if ( node.nodeType === 1 ) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+ };
+ },
+
+ "PSEUDO": function( pseudo, argument ) {
+ // pseudo-class names are case-insensitive
+ // http://www.w3.org/TR/selectors/#pseudo-classes
+ // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters
+ // Remember that setFilters inherits from pseudos
+ var args,
+ fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] ||
+ Sizzle.error( "unsupported pseudo: " + pseudo );
+
+ // The user may use createPseudo to indicate that
+ // arguments are needed to create the filter function
+ // just as Sizzle does
+ if ( fn[ expando ] ) {
+ return fn( argument );
+ }
+
+ // But maintain support for old signatures
+ if ( fn.length > 1 ) {
+ args = [ pseudo, pseudo, "", argument ];
+ return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ?
+ markFunction(function( seed, matches ) {
+ var idx,
+ matched = fn( seed, argument ),
+ i = matched.length;
+ while ( i-- ) {
+ idx = indexOf.call( seed, matched[i] );
+ seed[ idx ] = !( matches[ idx ] = matched[i] );
+ }
+ }) :
+ function( elem ) {
+ return fn( elem, 0, args );
+ };
+ }
+
+ return fn;
+ }
+ },
+
+ pseudos: {
+ "not": markFunction(function( selector ) {
+ // Trim the selector passed to compile
+ // to avoid treating leading and trailing
+ // spaces as combinators
+ var input = [],
+ results = [],
+ matcher = compile( selector.replace( rtrim, "$1" ) );
+
+ return matcher[ expando ] ?
+ markFunction(function( seed, matches, context, xml ) {
+ var elem,
+ unmatched = matcher( seed, null, xml, [] ),
+ i = seed.length;
+
+ // Match elements unmatched by `matcher`
+ while ( i-- ) {
+ if ( (elem = unmatched[i]) ) {
+ seed[i] = !(matches[i] = elem);
+ }
+ }
+ }) :
+ function( elem, context, xml ) {
+ input[0] = elem;
+ matcher( input, null, xml, results );
+ return !results.pop();
+ };
+ }),
+
+ "has": markFunction(function( selector ) {
+ return function( elem ) {
+ return Sizzle( selector, elem ).length > 0;
+ };
+ }),
+
+ "contains": markFunction(function( text ) {
+ return function( elem ) {
+ return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1;
+ };
+ }),
+
+ "enabled": function( elem ) {
+ return elem.disabled === false;
+ },
+
+ "disabled": function( elem ) {
+ return elem.disabled === true;
+ },
+
+ "checked": function( elem ) {
+ // In CSS3, :checked should return both checked and selected elements
+ // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked
+ var nodeName = elem.nodeName.toLowerCase();
+ return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected);
+ },
+
+ "selected": function( elem ) {
+ // Accessing this property makes selected-by-default
+ // options in Safari work properly
+ if ( elem.parentNode ) {
+ elem.parentNode.selectedIndex;
+ }
+
+ return elem.selected === true;
+ },
+
+ "parent": function( elem ) {
+ return !Expr.pseudos["empty"]( elem );
+ },
+
+ "empty": function( elem ) {
+ // http://www.w3.org/TR/selectors/#empty-pseudo
+ // :empty is only affected by element nodes and content nodes(including text(3), cdata(4)),
+ // not comment, processing instructions, or others
+ // Thanks to Diego Perini for the nodeName shortcut
+ // Greater than "@" means alpha characters (specifically not starting with "#" or "?")
+ var nodeType;
+ elem = elem.firstChild;
+ while ( elem ) {
+ if ( elem.nodeName > "@" || (nodeType = elem.nodeType) === 3 || nodeType === 4 ) {
+ return false;
+ }
+ elem = elem.nextSibling;
+ }
+ return true;
+ },
+
+ "header": function( elem ) {
+ return rheader.test( elem.nodeName );
+ },
+
+ "text": function( elem ) {
+ var type, attr;
+ // IE6 and 7 will map elem.type to 'text' for new HTML5 types (search, etc)
+ // use getAttribute instead to test this case
+ return elem.nodeName.toLowerCase() === "input" &&
+ (type = elem.type) === "text" &&
+ ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === type );
+ },
+
+ // Input types
+ "radio": createInputPseudo("radio"),
+ "checkbox": createInputPseudo("checkbox"),
+ "file": createInputPseudo("file"),
+ "password": createInputPseudo("password"),
+ "image": createInputPseudo("image"),
+
+ "submit": createButtonPseudo("submit"),
+ "reset": createButtonPseudo("reset"),
+
+ "button": function( elem ) {
+ var name = elem.nodeName.toLowerCase();
+ return name === "input" && elem.type === "button" || name === "button";
+ },
+
+ "input": function( elem ) {
+ return rinputs.test( elem.nodeName );
+ },
+
+ "focus": function( elem ) {
+ var doc = elem.ownerDocument;
+ return elem === doc.activeElement && (!doc.hasFocus || doc.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex);
+ },
+
+ "active": function( elem ) {
+ return elem === elem.ownerDocument.activeElement;
+ },
+
+ // Positional types
+ "first": createPositionalPseudo(function() {
+ return [ 0 ];
+ }),
+
+ "last": createPositionalPseudo(function( matchIndexes, length ) {
+ return [ length - 1 ];
+ }),
+
+ "eq": createPositionalPseudo(function( matchIndexes, length, argument ) {
+ return [ argument < 0 ? argument + length : argument ];
+ }),
+
+ "even": createPositionalPseudo(function( matchIndexes, length ) {
+ for ( var i = 0; i < length; i += 2 ) {
+ matchIndexes.push( i );
+ }
+ return matchIndexes;
+ }),
+
+ "odd": createPositionalPseudo(function( matchIndexes, length ) {
+ for ( var i = 1; i < length; i += 2 ) {
+ matchIndexes.push( i );
+ }
+ return matchIndexes;
+ }),
+
+ "lt": createPositionalPseudo(function( matchIndexes, length, argument ) {
+ for ( var i = argument < 0 ? argument + length : argument; --i >= 0; ) {
+ matchIndexes.push( i );
+ }
+ return matchIndexes;
+ }),
+
+ "gt": createPositionalPseudo(function( matchIndexes, length, argument ) {
+ for ( var i = argument < 0 ? argument + length : argument; ++i < length; ) {
+ matchIndexes.push( i );
+ }
+ return matchIndexes;
+ })
+ }
+};
+
+function siblingCheck( a, b, ret ) {
+ if ( a === b ) {
+ return ret;
+ }
+
+ var cur = a.nextSibling;
+
+ while ( cur ) {
+ if ( cur === b ) {
+ return -1;
+ }
+
+ cur = cur.nextSibling;
+ }
+
+ return 1;
+}
+
+sortOrder = docElem.compareDocumentPosition ?
+ function( a, b ) {
+ if ( a === b ) {
+ hasDuplicate = true;
+ return 0;
+ }
+
+ return ( !a.compareDocumentPosition || !b.compareDocumentPosition ?
+ a.compareDocumentPosition :
+ a.compareDocumentPosition(b) & 4
+ ) ? -1 : 1;
+ } :
+ function( a, b ) {
+ // The nodes are identical, we can exit early
+ if ( a === b ) {
+ hasDuplicate = true;
+ return 0;
+
+ // Fallback to using sourceIndex (in IE) if it's available on both nodes
+ } else if ( a.sourceIndex && b.sourceIndex ) {
+ return a.sourceIndex - b.sourceIndex;
+ }
+
+ var al, bl,
+ ap = [],
+ bp = [],
+ aup = a.parentNode,
+ bup = b.parentNode,
+ cur = aup;
+
+ // If the nodes are siblings (or identical) we can do a quick check
+ if ( aup === bup ) {
+ return siblingCheck( a, b );
+
+ // If no parents were found then the nodes are disconnected
+ } else if ( !aup ) {
+ return -1;
+
+ } else if ( !bup ) {
+ return 1;
+ }
+
+ // Otherwise they're somewhere else in the tree so we need
+ // to build up a full list of the parentNodes for comparison
+ while ( cur ) {
+ ap.unshift( cur );
+ cur = cur.parentNode;
+ }
+
+ cur = bup;
+
+ while ( cur ) {
+ bp.unshift( cur );
+ cur = cur.parentNode;
+ }
+
+ al = ap.length;
+ bl = bp.length;
+
+ // Start walking down the tree looking for a discrepancy
+ for ( var i = 0; i < al && i < bl; i++ ) {
+ if ( ap[i] !== bp[i] ) {
+ return siblingCheck( ap[i], bp[i] );
+ }
+ }
+
+ // We ended someplace up the tree so do a sibling check
+ return i === al ?
+ siblingCheck( a, bp[i], -1 ) :
+ siblingCheck( ap[i], b, 1 );
+ };
+
+// Always assume the presence of duplicates if sort doesn't
+// pass them to our comparison function (as in Google Chrome).
+[0, 0].sort( sortOrder );
+baseHasDuplicate = !hasDuplicate;
+
+// Document sorting and removing duplicates
+Sizzle.uniqueSort = function( results ) {
+ var elem,
+ duplicates = [],
+ i = 1,
+ j = 0;
+
+ hasDuplicate = baseHasDuplicate;
+ results.sort( sortOrder );
+
+ if ( hasDuplicate ) {
+ for ( ; (elem = results[i]); i++ ) {
+ if ( elem === results[ i - 1 ] ) {
+ j = duplicates.push( i );
+ }
+ }
+ while ( j-- ) {
+ results.splice( duplicates[ j ], 1 );
+ }
+ }
+
+ return results;
+};
+
+Sizzle.error = function( msg ) {
+ throw new Error( "Syntax error, unrecognized expression: " + msg );
+};
+
+function tokenize( selector, parseOnly ) {
+ var matched, match, tokens, type,
+ soFar, groups, preFilters,
+ cached = tokenCache[ expando ][ selector + " " ];
+
+ if ( cached ) {
+ return parseOnly ? 0 : cached.slice( 0 );
+ }
+
+ soFar = selector;
+ groups = [];
+ preFilters = Expr.preFilter;
+
+ while ( soFar ) {
+
+ // Comma and first run
+ if ( !matched || (match = rcomma.exec( soFar )) ) {
+ if ( match ) {
+ // Don't consume trailing commas as valid
+ soFar = soFar.slice( match[0].length ) || soFar;
+ }
+ groups.push( tokens = [] );
+ }
+
+ matched = false;
+
+ // Combinators
+ if ( (match = rcombinators.exec( soFar )) ) {
+ tokens.push( matched = new Token( match.shift() ) );
+ soFar = soFar.slice( matched.length );
+
+ // Cast descendant combinators to space
+ matched.type = match[0].replace( rtrim, " " );
+ }
+
+ // Filters
+ for ( type in Expr.filter ) {
+ if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] ||
+ (match = preFilters[ type ]( match ))) ) {
+
+ tokens.push( matched = new Token( match.shift() ) );
+ soFar = soFar.slice( matched.length );
+ matched.type = type;
+ matched.matches = match;
+ }
+ }
+
+ if ( !matched ) {
+ break;
+ }
+ }
+
+ // Return the length of the invalid excess
+ // if we're just parsing
+ // Otherwise, throw an error or return tokens
+ return parseOnly ?
+ soFar.length :
+ soFar ?
+ Sizzle.error( selector ) :
+ // Cache the tokens
+ tokenCache( selector, groups ).slice( 0 );
+}
+
+function addCombinator( matcher, combinator, base ) {
+ var dir = combinator.dir,
+ checkNonElements = base && combinator.dir === "parentNode",
+ doneName = done++;
+
+ return combinator.first ?
+ // Check against closest ancestor/preceding element
+ function( elem, context, xml ) {
+ while ( (elem = elem[ dir ]) ) {
+ if ( checkNonElements || elem.nodeType === 1 ) {
+ return matcher( elem, context, xml );
+ }
+ }
+ } :
+
+ // Check against all ancestor/preceding elements
+ function( elem, context, xml ) {
+ // We can't set arbitrary data on XML nodes, so they don't benefit from dir caching
+ if ( !xml ) {
+ var cache,
+ dirkey = dirruns + " " + doneName + " ",
+ cachedkey = dirkey + cachedruns;
+ while ( (elem = elem[ dir ]) ) {
+ if ( checkNonElements || elem.nodeType === 1 ) {
+ if ( (cache = elem[ expando ]) === cachedkey ) {
+ return elem.sizset;
+ } else if ( typeof cache === "string" && cache.indexOf(dirkey) === 0 ) {
+ if ( elem.sizset ) {
+ return elem;
+ }
+ } else {
+ elem[ expando ] = cachedkey;
+ if ( matcher( elem, context, xml ) ) {
+ elem.sizset = true;
+ return elem;
+ }
+ elem.sizset = false;
+ }
+ }
+ }
+ } else {
+ while ( (elem = elem[ dir ]) ) {
+ if ( checkNonElements || elem.nodeType === 1 ) {
+ if ( matcher( elem, context, xml ) ) {
+ return elem;
+ }
+ }
+ }
+ }
+ };
+}
+
+function elementMatcher( matchers ) {
+ return matchers.length > 1 ?
+ function( elem, context, xml ) {
+ var i = matchers.length;
+ while ( i-- ) {
+ if ( !matchers[i]( elem, context, xml ) ) {
+ return false;
+ }
+ }
+ return true;
+ } :
+ matchers[0];
+}
+
+function condense( unmatched, map, filter, context, xml ) {
+ var elem,
+ newUnmatched = [],
+ i = 0,
+ len = unmatched.length,
+ mapped = map != null;
+
+ for ( ; i < len; i++ ) {
+ if ( (elem = unmatched[i]) ) {
+ if ( !filter || filter( elem, context, xml ) ) {
+ newUnmatched.push( elem );
+ if ( mapped ) {
+ map.push( i );
+ }
+ }
+ }
+ }
+
+ return newUnmatched;
+}
+
+function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) {
+ if ( postFilter && !postFilter[ expando ] ) {
+ postFilter = setMatcher( postFilter );
+ }
+ if ( postFinder && !postFinder[ expando ] ) {
+ postFinder = setMatcher( postFinder, postSelector );
+ }
+ return markFunction(function( seed, results, context, xml ) {
+ var temp, i, elem,
+ preMap = [],
+ postMap = [],
+ preexisting = results.length,
+
+ // Get initial elements from seed or context
+ elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ),
+
+ // Prefilter to get matcher input, preserving a map for seed-results synchronization
+ matcherIn = preFilter && ( seed || !selector ) ?
+ condense( elems, preMap, preFilter, context, xml ) :
+ elems,
+
+ matcherOut = matcher ?
+ // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results,
+ postFinder || ( seed ? preFilter : preexisting || postFilter ) ?
+
+ // ...intermediate processing is necessary
+ [] :
+
+ // ...otherwise use results directly
+ results :
+ matcherIn;
+
+ // Find primary matches
+ if ( matcher ) {
+ matcher( matcherIn, matcherOut, context, xml );
+ }
+
+ // Apply postFilter
+ if ( postFilter ) {
+ temp = condense( matcherOut, postMap );
+ postFilter( temp, [], context, xml );
+
+ // Un-match failing elements by moving them back to matcherIn
+ i = temp.length;
+ while ( i-- ) {
+ if ( (elem = temp[i]) ) {
+ matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem);
+ }
+ }
+ }
+
+ if ( seed ) {
+ if ( postFinder || preFilter ) {
+ if ( postFinder ) {
+ // Get the final matcherOut by condensing this intermediate into postFinder contexts
+ temp = [];
+ i = matcherOut.length;
+ while ( i-- ) {
+ if ( (elem = matcherOut[i]) ) {
+ // Restore matcherIn since elem is not yet a final match
+ temp.push( (matcherIn[i] = elem) );
+ }
+ }
+ postFinder( null, (matcherOut = []), temp, xml );
+ }
+
+ // Move matched elements from seed to results to keep them synchronized
+ i = matcherOut.length;
+ while ( i-- ) {
+ if ( (elem = matcherOut[i]) &&
+ (temp = postFinder ? indexOf.call( seed, elem ) : preMap[i]) > -1 ) {
+
+ seed[temp] = !(results[temp] = elem);
+ }
+ }
+ }
+
+ // Add elements to results, through postFinder if defined
+ } else {
+ matcherOut = condense(
+ matcherOut === results ?
+ matcherOut.splice( preexisting, matcherOut.length ) :
+ matcherOut
+ );
+ if ( postFinder ) {
+ postFinder( null, results, matcherOut, xml );
+ } else {
+ push.apply( results, matcherOut );
+ }
+ }
+ });
+}
+
+function matcherFromTokens( tokens ) {
+ var checkContext, matcher, j,
+ len = tokens.length,
+ leadingRelative = Expr.relative[ tokens[0].type ],
+ implicitRelative = leadingRelative || Expr.relative[" "],
+ i = leadingRelative ? 1 : 0,
+
+ // The foundational matcher ensures that elements are reachable from top-level context(s)
+ matchContext = addCombinator( function( elem ) {
+ return elem === checkContext;
+ }, implicitRelative, true ),
+ matchAnyContext = addCombinator( function( elem ) {
+ return indexOf.call( checkContext, elem ) > -1;
+ }, implicitRelative, true ),
+ matchers = [ function( elem, context, xml ) {
+ return ( !leadingRelative && ( xml || context !== outermostContext ) ) || (
+ (checkContext = context).nodeType ?
+ matchContext( elem, context, xml ) :
+ matchAnyContext( elem, context, xml ) );
+ } ];
+
+ for ( ; i < len; i++ ) {
+ if ( (matcher = Expr.relative[ tokens[i].type ]) ) {
+ matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ];
+ } else {
+ matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches );
+
+ // Return special upon seeing a positional matcher
+ if ( matcher[ expando ] ) {
+ // Find the next relative operator (if any) for proper handling
+ j = ++i;
+ for ( ; j < len; j++ ) {
+ if ( Expr.relative[ tokens[j].type ] ) {
+ break;
+ }
+ }
+ return setMatcher(
+ i > 1 && elementMatcher( matchers ),
+ i > 1 && tokens.slice( 0, i - 1 ).join("").replace( rtrim, "$1" ),
+ matcher,
+ i < j && matcherFromTokens( tokens.slice( i, j ) ),
+ j < len && matcherFromTokens( (tokens = tokens.slice( j )) ),
+ j < len && tokens.join("")
+ );
+ }
+ matchers.push( matcher );
+ }
+ }
+
+ return elementMatcher( matchers );
+}
+
+function matcherFromGroupMatchers( elementMatchers, setMatchers ) {
+ var bySet = setMatchers.length > 0,
+ byElement = elementMatchers.length > 0,
+ superMatcher = function( seed, context, xml, results, expandContext ) {
+ var elem, j, matcher,
+ setMatched = [],
+ matchedCount = 0,
+ i = "0",
+ unmatched = seed && [],
+ outermost = expandContext != null,
+ contextBackup = outermostContext,
+ // We must always have either seed elements or context
+ elems = seed || byElement && Expr.find["TAG"]( "*", expandContext && context.parentNode || context ),
+ // Nested matchers should use non-integer dirruns
+ dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.E);
+
+ if ( outermost ) {
+ outermostContext = context !== document && context;
+ cachedruns = superMatcher.el;
+ }
+
+ // Add elements passing elementMatchers directly to results
+ for ( ; (elem = elems[i]) != null; i++ ) {
+ if ( byElement && elem ) {
+ for ( j = 0; (matcher = elementMatchers[j]); j++ ) {
+ if ( matcher( elem, context, xml ) ) {
+ results.push( elem );
+ break;
+ }
+ }
+ if ( outermost ) {
+ dirruns = dirrunsUnique;
+ cachedruns = ++superMatcher.el;
+ }
+ }
+
+ // Track unmatched elements for set filters
+ if ( bySet ) {
+ // They will have gone through all possible matchers
+ if ( (elem = !matcher && elem) ) {
+ matchedCount--;
+ }
+
+ // Lengthen the array for every element, matched or not
+ if ( seed ) {
+ unmatched.push( elem );
+ }
+ }
+ }
+
+ // Apply set filters to unmatched elements
+ matchedCount += i;
+ if ( bySet && i !== matchedCount ) {
+ for ( j = 0; (matcher = setMatchers[j]); j++ ) {
+ matcher( unmatched, setMatched, context, xml );
+ }
+
+ if ( seed ) {
+ // Reintegrate element matches to eliminate the need for sorting
+ if ( matchedCount > 0 ) {
+ while ( i-- ) {
+ if ( !(unmatched[i] || setMatched[i]) ) {
+ setMatched[i] = pop.call( results );
+ }
+ }
+ }
+
+ // Discard index placeholder values to get only actual matches
+ setMatched = condense( setMatched );
+ }
+
+ // Add matches to results
+ push.apply( results, setMatched );
+
+ // Seedless set matches succeeding multiple successful matchers stipulate sorting
+ if ( outermost && !seed && setMatched.length > 0 &&
+ ( matchedCount + setMatchers.length ) > 1 ) {
+
+ Sizzle.uniqueSort( results );
+ }
+ }
+
+ // Override manipulation of globals by nested matchers
+ if ( outermost ) {
+ dirruns = dirrunsUnique;
+ outermostContext = contextBackup;
+ }
+
+ return unmatched;
+ };
+
+ superMatcher.el = 0;
+ return bySet ?
+ markFunction( superMatcher ) :
+ superMatcher;
+}
+
+compile = Sizzle.compile = function( selector, group /* Internal Use Only */ ) {
+ var i,
+ setMatchers = [],
+ elementMatchers = [],
+ cached = compilerCache[ expando ][ selector + " " ];
+
+ if ( !cached ) {
+ // Generate a function of recursive functions that can be used to check each element
+ if ( !group ) {
+ group = tokenize( selector );
+ }
+ i = group.length;
+ while ( i-- ) {
+ cached = matcherFromTokens( group[i] );
+ if ( cached[ expando ] ) {
+ setMatchers.push( cached );
+ } else {
+ elementMatchers.push( cached );
+ }
+ }
+
+ // Cache the compiled function
+ cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) );
+ }
+ return cached;
+};
+
+function multipleContexts( selector, contexts, results ) {
+ var i = 0,
+ len = contexts.length;
+ for ( ; i < len; i++ ) {
+ Sizzle( selector, contexts[i], results );
+ }
+ return results;
+}
+
+function select( selector, context, results, seed, xml ) {
+ var i, tokens, token, type, find,
+ match = tokenize( selector ),
+ j = match.length;
+
+ if ( !seed ) {
+ // Try to minimize operations if there is only one group
+ if ( match.length === 1 ) {
+
+ // Take a shortcut and set the context if the root selector is an ID
+ tokens = match[0] = match[0].slice( 0 );
+ if ( tokens.length > 2 && (token = tokens[0]).type === "ID" &&
+ context.nodeType === 9 && !xml &&
+ Expr.relative[ tokens[1].type ] ) {
+
+ context = Expr.find["ID"]( token.matches[0].replace( rbackslash, "" ), context, xml )[0];
+ if ( !context ) {
+ return results;
+ }
+
+ selector = selector.slice( tokens.shift().length );
+ }
+
+ // Fetch a seed set for right-to-left matching
+ for ( i = matchExpr["POS"].test( selector ) ? -1 : tokens.length - 1; i >= 0; i-- ) {
+ token = tokens[i];
+
+ // Abort if we hit a combinator
+ if ( Expr.relative[ (type = token.type) ] ) {
+ break;
+ }
+ if ( (find = Expr.find[ type ]) ) {
+ // Search, expanding context for leading sibling combinators
+ if ( (seed = find(
+ token.matches[0].replace( rbackslash, "" ),
+ rsibling.test( tokens[0].type ) && context.parentNode || context,
+ xml
+ )) ) {
+
+ // If seed is empty or no tokens remain, we can return early
+ tokens.splice( i, 1 );
+ selector = seed.length && tokens.join("");
+ if ( !selector ) {
+ push.apply( results, slice.call( seed, 0 ) );
+ return results;
+ }
+
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // Compile and execute a filtering function
+ // Provide `match` to avoid retokenization if we modified the selector above
+ compile( selector, match )(
+ seed,
+ context,
+ xml,
+ results,
+ rsibling.test( selector )
+ );
+ return results;
+}
+
+if ( document.querySelectorAll ) {
+ (function() {
+ var disconnectedMatch,
+ oldSelect = select,
+ rescape = /'|\\/g,
+ rattributeQuotes = /\=[\x20\t\r\n\f]*([^'"\]]*)[\x20\t\r\n\f]*\]/g,
+
+ // qSa(:focus) reports false when true (Chrome 21), no need to also add to buggyMatches since matches checks buggyQSA
+ // A support test would require too much code (would include document ready)
+ rbuggyQSA = [ ":focus" ],
+
+ // matchesSelector(:active) reports false when true (IE9/Opera 11.5)
+ // A support test would require too much code (would include document ready)
+ // just skip matchesSelector for :active
+ rbuggyMatches = [ ":active" ],
+ matches = docElem.matchesSelector ||
+ docElem.mozMatchesSelector ||
+ docElem.webkitMatchesSelector ||
+ docElem.oMatchesSelector ||
+ docElem.msMatchesSelector;
+
+ // Build QSA regex
+ // Regex strategy adopted from Diego Perini
+ assert(function( div ) {
+ // Select is set to empty string on purpose
+ // This is to test IE's treatment of not explictly
+ // setting a boolean content attribute,
+ // since its presence should be enough
+ // http://bugs.jquery.com/ticket/12359
+ div.innerHTML = "<select><option selected=''></option></select>";
+
+ // IE8 - Some boolean attributes are not treated correctly
+ if ( !div.querySelectorAll("[selected]").length ) {
+ rbuggyQSA.push( "\\[" + whitespace + "*(?:checked|disabled|ismap|multiple|readonly|selected|value)" );
+ }
+
+ // Webkit/Opera - :checked should return selected option elements
+ // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked
+ // IE8 throws error here (do not put tests after this one)
+ if ( !div.querySelectorAll(":checked").length ) {
+ rbuggyQSA.push(":checked");
+ }
+ });
+
+ assert(function( div ) {
+
+ // Opera 10-12/IE9 - ^= $= *= and empty values
+ // Should not select anything
+ div.innerHTML = "<p test=''></p>";
+ if ( div.querySelectorAll("[test^='']").length ) {
+ rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:\"\"|'')" );
+ }
+
+ // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled)
+ // IE8 throws error here (do not put tests after this one)
+ div.innerHTML = "<input type='hidden'/>";
+ if ( !div.querySelectorAll(":enabled").length ) {
+ rbuggyQSA.push(":enabled", ":disabled");
+ }
+ });
+
+ // rbuggyQSA always contains :focus, so no need for a length check
+ rbuggyQSA = /* rbuggyQSA.length && */ new RegExp( rbuggyQSA.join("|") );
+
+ select = function( selector, context, results, seed, xml ) {
+ // Only use querySelectorAll when not filtering,
+ // when this is not xml,
+ // and when no QSA bugs apply
+ if ( !seed && !xml && !rbuggyQSA.test( selector ) ) {
+ var groups, i,
+ old = true,
+ nid = expando,
+ newContext = context,
+ newSelector = context.nodeType === 9 && selector;
+
+ // qSA works strangely on Element-rooted queries
+ // We can work around this by specifying an extra ID on the root
+ // and working up from there (Thanks to Andrew Dupont for the technique)
+ // IE 8 doesn't work on object elements
+ if ( context.nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) {
+ groups = tokenize( selector );
+
+ if ( (old = context.getAttribute("id")) ) {
+ nid = old.replace( rescape, "\\$&" );
+ } else {
+ context.setAttribute( "id", nid );
+ }
+ nid = "[id='" + nid + "'] ";
+
+ i = groups.length;
+ while ( i-- ) {
+ groups[i] = nid + groups[i].join("");
+ }
+ newContext = rsibling.test( selector ) && context.parentNode || context;
+ newSelector = groups.join(",");
+ }
+
+ if ( newSelector ) {
+ try {
+ push.apply( results, slice.call( newContext.querySelectorAll(
+ newSelector
+ ), 0 ) );
+ return results;
+ } catch(qsaError) {
+ } finally {
+ if ( !old ) {
+ context.removeAttribute("id");
+ }
+ }
+ }
+ }
+
+ return oldSelect( selector, context, results, seed, xml );
+ };
+
+ if ( matches ) {
+ assert(function( div ) {
+ // Check to see if it's possible to do matchesSelector
+ // on a disconnected node (IE 9)
+ disconnectedMatch = matches.call( div, "div" );
+
+ // This should fail with an exception
+ // Gecko does not error, returns false instead
+ try {
+ matches.call( div, "[test!='']:sizzle" );
+ rbuggyMatches.push( "!=", pseudos );
+ } catch ( e ) {}
+ });
+
+ // rbuggyMatches always contains :active and :focus, so no need for a length check
+ rbuggyMatches = /* rbuggyMatches.length && */ new RegExp( rbuggyMatches.join("|") );
+
+ Sizzle.matchesSelector = function( elem, expr ) {
+ // Make sure that attribute selectors are quoted
+ expr = expr.replace( rattributeQuotes, "='$1']" );
+
+ // rbuggyMatches always contains :active, so no need for an existence check
+ if ( !isXML( elem ) && !rbuggyMatches.test( expr ) && !rbuggyQSA.test( expr ) ) {
+ try {
+ var ret = matches.call( elem, expr );
+
+ // IE 9's matchesSelector returns false on disconnected nodes
+ if ( ret || disconnectedMatch ||
+ // As well, disconnected nodes are said to be in a document
+ // fragment in IE 9
+ elem.document && elem.document.nodeType !== 11 ) {
+ return ret;
+ }
+ } catch(e) {}
+ }
+
+ return Sizzle( expr, null, null, [ elem ] ).length > 0;
+ };
+ }
+ })();
+}
+
+// Deprecated
+Expr.pseudos["nth"] = Expr.pseudos["eq"];
+
+// Back-compat
+function setFilters() {}
+Expr.filters = setFilters.prototype = Expr.pseudos;
+Expr.setFilters = new setFilters();
+
+// Override sizzle attribute retrieval
+Sizzle.attr = jQuery.attr;
+jQuery.find = Sizzle;
+jQuery.expr = Sizzle.selectors;
+jQuery.expr[":"] = jQuery.expr.pseudos;
+jQuery.unique = Sizzle.uniqueSort;
+jQuery.text = Sizzle.getText;
+jQuery.isXMLDoc = Sizzle.isXML;
+jQuery.contains = Sizzle.contains;
+
+
+})( window );
+var runtil = /Until$/,
+ rparentsprev = /^(?:parents|prev(?:Until|All))/,
+ isSimple = /^.[^:#\[\.,]*$/,
+ rneedsContext = jQuery.expr.match.needsContext,
+ // methods guaranteed to produce a unique set when starting from a unique set
+ guaranteedUnique = {
+ children: true,
+ contents: true,
+ next: true,
+ prev: true
+ };
+
+jQuery.fn.extend({
+ find: function( selector ) {
+ var i, l, length, n, r, ret,
+ self = this;
+
+ if ( typeof selector !== "string" ) {
+ return jQuery( selector ).filter(function() {
+ for ( i = 0, l = self.length; i < l; i++ ) {
+ if ( jQuery.contains( self[ i ], this ) ) {
+ return true;
+ }
+ }
+ });
+ }
+
+ ret = this.pushStack( "", "find", selector );
+
+ for ( i = 0, l = this.length; i < l; i++ ) {
+ length = ret.length;
+ jQuery.find( selector, this[i], ret );
+
+ if ( i > 0 ) {
+ // Make sure that the results are unique
+ for ( n = length; n < ret.length; n++ ) {
+ for ( r = 0; r < length; r++ ) {
+ if ( ret[r] === ret[n] ) {
+ ret.splice(n--, 1);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return ret;
+ },
+
+ has: function( target ) {
+ var i,
+ targets = jQuery( target, this ),
+ len = targets.length;
+
+ return this.filter(function() {
+ for ( i = 0; i < len; i++ ) {
+ if ( jQuery.contains( this, targets[i] ) ) {
+ return true;
+ }
+ }
+ });
+ },
+
+ not: function( selector ) {
+ return this.pushStack( winnow(this, selector, false), "not", selector);
+ },
+
+ filter: function( selector ) {
+ return this.pushStack( winnow(this, selector, true), "filter", selector );
+ },
+
+ is: function( selector ) {
+ return !!selector && (
+ typeof selector === "string" ?
+ // If this is a positional/relative selector, check membership in the returned set
+ // so $("p:first").is("p:last") won't return true for a doc with two "p".
+ rneedsContext.test( selector ) ?
+ jQuery( selector, this.context ).index( this[0] ) >= 0 :
+ jQuery.filter( selector, this ).length > 0 :
+ this.filter( selector ).length > 0 );
+ },
+
+ closest: function( selectors, context ) {
+ var cur,
+ i = 0,
+ l = this.length,
+ ret = [],
+ pos = rneedsContext.test( selectors ) || typeof selectors !== "string" ?
+ jQuery( selectors, context || this.context ) :
+ 0;
+
+ for ( ; i < l; i++ ) {
+ cur = this[i];
+
+ while ( cur && cur.ownerDocument && cur !== context && cur.nodeType !== 11 ) {
+ if ( pos ? pos.index(cur) > -1 : jQuery.find.matchesSelector(cur, selectors) ) {
+ ret.push( cur );
+ break;
+ }
+ cur = cur.parentNode;
+ }
+ }
+
+ ret = ret.length > 1 ? jQuery.unique( ret ) : ret;
+
+ return this.pushStack( ret, "closest", selectors );
+ },
+
+ // Determine the position of an element within
+ // the matched set of elements
+ index: function( elem ) {
+
+ // No argument, return index in parent
+ if ( !elem ) {
+ return ( this[0] && this[0].parentNode ) ? this.prevAll().length : -1;
+ }
+
+ // index in selector
+ if ( typeof elem === "string" ) {
+ return jQuery.inArray( this[0], jQuery( elem ) );
+ }
+
+ // Locate the position of the desired element
+ return jQuery.inArray(
+ // If it receives a jQuery object, the first element is used
+ elem.jquery ? elem[0] : elem, this );
+ },
+
+ add: function( selector, context ) {
+ var set = typeof selector === "string" ?
+ jQuery( selector, context ) :
+ jQuery.makeArray( selector && selector.nodeType ? [ selector ] : selector ),
+ all = jQuery.merge( this.get(), set );
+
+ return this.pushStack( isDisconnected( set[0] ) || isDisconnected( all[0] ) ?
+ all :
+ jQuery.unique( all ) );
+ },
+
+ addBack: function( selector ) {
+ return this.add( selector == null ?
+ this.prevObject : this.prevObject.filter(selector)
+ );
+ }
+});
+
+jQuery.fn.andSelf = jQuery.fn.addBack;
+
+// A painfully simple check to see if an element is disconnected
+// from a document (should be improved, where feasible).
+function isDisconnected( node ) {
+ return !node || !node.parentNode || node.parentNode.nodeType === 11;
+}
+
+function sibling( cur, dir ) {
+ do {
+ cur = cur[ dir ];
+ } while ( cur && cur.nodeType !== 1 );
+
+ return cur;
+}
+
+jQuery.each({
+ parent: function( elem ) {
+ var parent = elem.parentNode;
+ return parent && parent.nodeType !== 11 ? parent : null;
+ },
+ parents: function( elem ) {
+ return jQuery.dir( elem, "parentNode" );
+ },
+ parentsUntil: function( elem, i, until ) {
+ return jQuery.dir( elem, "parentNode", until );
+ },
+ next: function( elem ) {
+ return sibling( elem, "nextSibling" );
+ },
+ prev: function( elem ) {
+ return sibling( elem, "previousSibling" );
+ },
+ nextAll: function( elem ) {
+ return jQuery.dir( elem, "nextSibling" );
+ },
+ prevAll: function( elem ) {
+ return jQuery.dir( elem, "previousSibling" );
+ },
+ nextUntil: function( elem, i, until ) {
+ return jQuery.dir( elem, "nextSibling", until );
+ },
+ prevUntil: function( elem, i, until ) {
+ return jQuery.dir( elem, "previousSibling", until );
+ },
+ siblings: function( elem ) {
+ return jQuery.sibling( ( elem.parentNode || {} ).firstChild, elem );
+ },
+ children: function( elem ) {
+ return jQuery.sibling( elem.firstChild );
+ },
+ contents: function( elem ) {
+ return jQuery.nodeName( elem, "iframe" ) ?
+ elem.contentDocument || elem.contentWindow.document :
+ jQuery.merge( [], elem.childNodes );
+ }
+}, function( name, fn ) {
+ jQuery.fn[ name ] = function( until, selector ) {
+ var ret = jQuery.map( this, fn, until );
+
+ if ( !runtil.test( name ) ) {
+ selector = until;
+ }
+
+ if ( selector && typeof selector === "string" ) {
+ ret = jQuery.filter( selector, ret );
+ }
+
+ ret = this.length > 1 && !guaranteedUnique[ name ] ? jQuery.unique( ret ) : ret;
+
+ if ( this.length > 1 && rparentsprev.test( name ) ) {
+ ret = ret.reverse();
+ }
+
+ return this.pushStack( ret, name, core_slice.call( arguments ).join(",") );
+ };
+});
+
+jQuery.extend({
+ filter: function( expr, elems, not ) {
+ if ( not ) {
+ expr = ":not(" + expr + ")";
+ }
+
+ return elems.length === 1 ?
+ jQuery.find.matchesSelector(elems[0], expr) ? [ elems[0] ] : [] :
+ jQuery.find.matches(expr, elems);
+ },
+
+ dir: function( elem, dir, until ) {
+ var matched = [],
+ cur = elem[ dir ];
+
+ while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) {
+ if ( cur.nodeType === 1 ) {
+ matched.push( cur );
+ }
+ cur = cur[dir];
+ }
+ return matched;
+ },
+
+ sibling: function( n, elem ) {
+ var r = [];
+
+ for ( ; n; n = n.nextSibling ) {
+ if ( n.nodeType === 1 && n !== elem ) {
+ r.push( n );
+ }
+ }
+
+ return r;
+ }
+});
+
+// Implement the identical functionality for filter and not
+function winnow( elements, qualifier, keep ) {
+
+ // Can't pass null or undefined to indexOf in Firefox 4
+ // Set to 0 to skip string check
+ qualifier = qualifier || 0;
+
+ if ( jQuery.isFunction( qualifier ) ) {
+ return jQuery.grep(elements, function( elem, i ) {
+ var retVal = !!qualifier.call( elem, i, elem );
+ return retVal === keep;
+ });
+
+ } else if ( qualifier.nodeType ) {
+ return jQuery.grep(elements, function( elem, i ) {
+ return ( elem === qualifier ) === keep;
+ });
+
+ } else if ( typeof qualifier === "string" ) {
+ var filtered = jQuery.grep(elements, function( elem ) {
+ return elem.nodeType === 1;
+ });
+
+ if ( isSimple.test( qualifier ) ) {
+ return jQuery.filter(qualifier, filtered, !keep);
+ } else {
+ qualifier = jQuery.filter( qualifier, filtered );
+ }
+ }
+
+ return jQuery.grep(elements, function( elem, i ) {
+ return ( jQuery.inArray( elem, qualifier ) >= 0 ) === keep;
+ });
+}
+function createSafeFragment( document ) {
+ var list = nodeNames.split( "|" ),
+ safeFrag = document.createDocumentFragment();
+
+ if ( safeFrag.createElement ) {
+ while ( list.length ) {
+ safeFrag.createElement(
+ list.pop()
+ );
+ }
+ }
+ return safeFrag;
+}
+
+var nodeNames = "abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|" +
+ "header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",
+ rinlinejQuery = / jQuery\d+="(?:null|\d+)"/g,
+ rleadingWhitespace = /^\s+/,
+ rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,
+ rtagName = /<([\w:]+)/,
+ rtbody = /<tbody/i,
+ rhtml = /<|&#?\w+;/,
+ rnoInnerhtml = /<(?:script|style|link)/i,
+ rnocache = /<(?:script|object|embed|option|style)/i,
+ rnoshimcache = new RegExp("<(?:" + nodeNames + ")[\\s/>]", "i"),
+ rcheckableType = /^(?:checkbox|radio)$/,
+ // checked="checked" or checked
+ rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i,
+ rscriptType = /\/(java|ecma)script/i,
+ rcleanScript = /^\s*<!(?:\[CDATA\[|\-\-)|[\]\-]{2}>\s*$/g,
+ wrapMap = {
+ option: [ 1, "<select multiple='multiple'>", "</select>" ],
+ legend: [ 1, "<fieldset>", "</fieldset>" ],
+ thead: [ 1, "<table>", "</table>" ],
+ tr: [ 2, "<table><tbody>", "</tbody></table>" ],
+ td: [ 3, "<table><tbody><tr>", "</tr></tbody></table>" ],
+ col: [ 2, "<table><tbody></tbody><colgroup>", "</colgroup></table>" ],
+ area: [ 1, "<map>", "</map>" ],
+ _default: [ 0, "", "" ]
+ },
+ safeFragment = createSafeFragment( document ),
+ fragmentDiv = safeFragment.appendChild( document.createElement("div") );
+
+wrapMap.optgroup = wrapMap.option;
+wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead;
+wrapMap.th = wrapMap.td;
+
+// IE6-8 can't serialize link, script, style, or any html5 (NoScope) tags,
+// unless wrapped in a div with non-breaking characters in front of it.
+if ( !jQuery.support.htmlSerialize ) {
+ wrapMap._default = [ 1, "X<div>", "</div>" ];
+}
+
+jQuery.fn.extend({
+ text: function( value ) {
+ return jQuery.access( this, function( value ) {
+ return value === undefined ?
+ jQuery.text( this ) :
+ this.empty().append( ( this[0] && this[0].ownerDocument || document ).createTextNode( value ) );
+ }, null, value, arguments.length );
+ },
+
+ wrapAll: function( html ) {
+ if ( jQuery.isFunction( html ) ) {
+ return this.each(function(i) {
+ jQuery(this).wrapAll( html.call(this, i) );
+ });
+ }
+
+ if ( this[0] ) {
+ // The elements to wrap the target around
+ var wrap = jQuery( html, this[0].ownerDocument ).eq(0).clone(true);
+
+ if ( this[0].parentNode ) {
+ wrap.insertBefore( this[0] );
+ }
+
+ wrap.map(function() {
+ var elem = this;
+
+ while ( elem.firstChild && elem.firstChild.nodeType === 1 ) {
+ elem = elem.firstChild;
+ }
+
+ return elem;
+ }).append( this );
+ }
+
+ return this;
+ },
+
+ wrapInner: function( html ) {
+ if ( jQuery.isFunction( html ) ) {
+ return this.each(function(i) {
+ jQuery(this).wrapInner( html.call(this, i) );
+ });
+ }
+
+ return this.each(function() {
+ var self = jQuery( this ),
+ contents = self.contents();
+
+ if ( contents.length ) {
+ contents.wrapAll( html );
+
+ } else {
+ self.append( html );
+ }
+ });
+ },
+
+ wrap: function( html ) {
+ var isFunction = jQuery.isFunction( html );
+
+ return this.each(function(i) {
+ jQuery( this ).wrapAll( isFunction ? html.call(this, i) : html );
+ });
+ },
+
+ unwrap: function() {
+ return this.parent().each(function() {
+ if ( !jQuery.nodeName( this, "body" ) ) {
+ jQuery( this ).replaceWith( this.childNodes );
+ }
+ }).end();
+ },
+
+ append: function() {
+ return this.domManip(arguments, true, function( elem ) {
+ if ( this.nodeType === 1 || this.nodeType === 11 ) {
+ this.appendChild( elem );
+ }
+ });
+ },
+
+ prepend: function() {
+ return this.domManip(arguments, true, function( elem ) {
+ if ( this.nodeType === 1 || this.nodeType === 11 ) {
+ this.insertBefore( elem, this.firstChild );
+ }
+ });
+ },
+
+ before: function() {
+ if ( !isDisconnected( this[0] ) ) {
+ return this.domManip(arguments, false, function( elem ) {
+ this.parentNode.insertBefore( elem, this );
+ });
+ }
+
+ if ( arguments.length ) {
+ var set = jQuery.clean( arguments );
+ return this.pushStack( jQuery.merge( set, this ), "before", this.selector );
+ }
+ },
+
+ after: function() {
+ if ( !isDisconnected( this[0] ) ) {
+ return this.domManip(arguments, false, function( elem ) {
+ this.parentNode.insertBefore( elem, this.nextSibling );
+ });
+ }
+
+ if ( arguments.length ) {
+ var set = jQuery.clean( arguments );
+ return this.pushStack( jQuery.merge( this, set ), "after", this.selector );
+ }
+ },
+
+ // keepData is for internal use only--do not document
+ remove: function( selector, keepData ) {
+ var elem,
+ i = 0;
+
+ for ( ; (elem = this[i]) != null; i++ ) {
+ if ( !selector || jQuery.filter( selector, [ elem ] ).length ) {
+ if ( !keepData && elem.nodeType === 1 ) {
+ jQuery.cleanData( elem.getElementsByTagName("*") );
+ jQuery.cleanData( [ elem ] );
+ }
+
+ if ( elem.parentNode ) {
+ elem.parentNode.removeChild( elem );
+ }
+ }
+ }
+
+ return this;
+ },
+
+ empty: function() {
+ var elem,
+ i = 0;
+
+ for ( ; (elem = this[i]) != null; i++ ) {
+ // Remove element nodes and prevent memory leaks
+ if ( elem.nodeType === 1 ) {
+ jQuery.cleanData( elem.getElementsByTagName("*") );
+ }
+
+ // Remove any remaining nodes
+ while ( elem.firstChild ) {
+ elem.removeChild( elem.firstChild );
+ }
+ }
+
+ return this;
+ },
+
+ clone: function( dataAndEvents, deepDataAndEvents ) {
+ dataAndEvents = dataAndEvents == null ? false : dataAndEvents;
+ deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents;
+
+ return this.map( function () {
+ return jQuery.clone( this, dataAndEvents, deepDataAndEvents );
+ });
+ },
+
+ html: function( value ) {
+ return jQuery.access( this, function( value ) {
+ var elem = this[0] || {},
+ i = 0,
+ l = this.length;
+
+ if ( value === undefined ) {
+ return elem.nodeType === 1 ?
+ elem.innerHTML.replace( rinlinejQuery, "" ) :
+ undefined;
+ }
+
+ // See if we can take a shortcut and just use innerHTML
+ if ( typeof value === "string" && !rnoInnerhtml.test( value ) &&
+ ( jQuery.support.htmlSerialize || !rnoshimcache.test( value ) ) &&
+ ( jQuery.support.leadingWhitespace || !rleadingWhitespace.test( value ) ) &&
+ !wrapMap[ ( rtagName.exec( value ) || ["", ""] )[1].toLowerCase() ] ) {
+
+ value = value.replace( rxhtmlTag, "<$1></$2>" );
+
+ try {
+ for (; i < l; i++ ) {
+ // Remove element nodes and prevent memory leaks
+ elem = this[i] || {};
+ if ( elem.nodeType === 1 ) {
+ jQuery.cleanData( elem.getElementsByTagName( "*" ) );
+ elem.innerHTML = value;
+ }
+ }
+
+ elem = 0;
+
+ // If using innerHTML throws an exception, use the fallback method
+ } catch(e) {}
+ }
+
+ if ( elem ) {
+ this.empty().append( value );
+ }
+ }, null, value, arguments.length );
+ },
+
+ replaceWith: function( value ) {
+ if ( !isDisconnected( this[0] ) ) {
+ // Make sure that the elements are removed from the DOM before they are inserted
+ // this can help fix replacing a parent with child elements
+ if ( jQuery.isFunction( value ) ) {
+ return this.each(function(i) {
+ var self = jQuery(this), old = self.html();
+ self.replaceWith( value.call( this, i, old ) );
+ });
+ }
+
+ if ( typeof value !== "string" ) {
+ value = jQuery( value ).detach();
+ }
+
+ return this.each(function() {
+ var next = this.nextSibling,
+ parent = this.parentNode;
+
+ jQuery( this ).remove();
+
+ if ( next ) {
+ jQuery(next).before( value );
+ } else {
+ jQuery(parent).append( value );
+ }
+ });
+ }
+
+ return this.length ?
+ this.pushStack( jQuery(jQuery.isFunction(value) ? value() : value), "replaceWith", value ) :
+ this;
+ },
+
+ detach: function( selector ) {
+ return this.remove( selector, true );
+ },
+
+ domManip: function( args, table, callback ) {
+
+ // Flatten any nested arrays
+ args = [].concat.apply( [], args );
+
+ var results, first, fragment, iNoClone,
+ i = 0,
+ value = args[0],
+ scripts = [],
+ l = this.length;
+
+ // We can't cloneNode fragments that contain checked, in WebKit
+ if ( !jQuery.support.checkClone && l > 1 && typeof value === "string" && rchecked.test( value ) ) {
+ return this.each(function() {
+ jQuery(this).domManip( args, table, callback );
+ });
+ }
+
+ if ( jQuery.isFunction(value) ) {
+ return this.each(function(i) {
+ var self = jQuery(this);
+ args[0] = value.call( this, i, table ? self.html() : undefined );
+ self.domManip( args, table, callback );
+ });
+ }
+
+ if ( this[0] ) {
+ results = jQuery.buildFragment( args, this, scripts );
+ fragment = results.fragment;
+ first = fragment.firstChild;
+
+ if ( fragment.childNodes.length === 1 ) {
+ fragment = first;
+ }
+
+ if ( first ) {
+ table = table && jQuery.nodeName( first, "tr" );
+
+ // Use the original fragment for the last item instead of the first because it can end up
+ // being emptied incorrectly in certain situations (#8070).
+ // Fragments from the fragment cache must always be cloned and never used in place.
+ for ( iNoClone = results.cacheable || l - 1; i < l; i++ ) {
+ callback.call(
+ table && jQuery.nodeName( this[i], "table" ) ?
+ findOrAppend( this[i], "tbody" ) :
+ this[i],
+ i === iNoClone ?
+ fragment :
+ jQuery.clone( fragment, true, true )
+ );
+ }
+ }
+
+ // Fix #11809: Avoid leaking memory
+ fragment = first = null;
+
+ if ( scripts.length ) {
+ jQuery.each( scripts, function( i, elem ) {
+ if ( elem.src ) {
+ if ( jQuery.ajax ) {
+ jQuery.ajax({
+ url: elem.src,
+ type: "GET",
+ dataType: "script",
+ async: false,
+ global: false,
+ "throws": true
+ });
+ } else {
+ jQuery.error("no ajax");
+ }
+ } else {
+ jQuery.globalEval( ( elem.text || elem.textContent || elem.innerHTML || "" ).replace( rcleanScript, "" ) );
+ }
+
+ if ( elem.parentNode ) {
+ elem.parentNode.removeChild( elem );
+ }
+ });
+ }
+ }
+
+ return this;
+ }
+});
+
+function findOrAppend( elem, tag ) {
+ return elem.getElementsByTagName( tag )[0] || elem.appendChild( elem.ownerDocument.createElement( tag ) );
+}
+
+function cloneCopyEvent( src, dest ) {
+
+ if ( dest.nodeType !== 1 || !jQuery.hasData( src ) ) {
+ return;
+ }
+
+ var type, i, l,
+ oldData = jQuery._data( src ),
+ curData = jQuery._data( dest, oldData ),
+ events = oldData.events;
+
+ if ( events ) {
+ delete curData.handle;
+ curData.events = {};
+
+ for ( type in events ) {
+ for ( i = 0, l = events[ type ].length; i < l; i++ ) {
+ jQuery.event.add( dest, type, events[ type ][ i ] );
+ }
+ }
+ }
+
+ // make the cloned public data object a copy from the original
+ if ( curData.data ) {
+ curData.data = jQuery.extend( {}, curData.data );
+ }
+}
+
+function cloneFixAttributes( src, dest ) {
+ var nodeName;
+
+ // We do not need to do anything for non-Elements
+ if ( dest.nodeType !== 1 ) {
+ return;
+ }
+
+ // clearAttributes removes the attributes, which we don't want,
+ // but also removes the attachEvent events, which we *do* want
+ if ( dest.clearAttributes ) {
+ dest.clearAttributes();
+ }
+
+ // mergeAttributes, in contrast, only merges back on the
+ // original attributes, not the events
+ if ( dest.mergeAttributes ) {
+ dest.mergeAttributes( src );
+ }
+
+ nodeName = dest.nodeName.toLowerCase();
+
+ if ( nodeName === "object" ) {
+ // IE6-10 improperly clones children of object elements using classid.
+ // IE10 throws NoModificationAllowedError if parent is null, #12132.
+ if ( dest.parentNode ) {
+ dest.outerHTML = src.outerHTML;
+ }
+
+ // This path appears unavoidable for IE9. When cloning an object
+ // element in IE9, the outerHTML strategy above is not sufficient.
+ // If the src has innerHTML and the destination does not,
+ // copy the src.innerHTML into the dest.innerHTML. #10324
+ if ( jQuery.support.html5Clone && (src.innerHTML && !jQuery.trim(dest.innerHTML)) ) {
+ dest.innerHTML = src.innerHTML;
+ }
+
+ } else if ( nodeName === "input" && rcheckableType.test( src.type ) ) {
+ // IE6-8 fails to persist the checked state of a cloned checkbox
+ // or radio button. Worse, IE6-7 fail to give the cloned element
+ // a checked appearance if the defaultChecked value isn't also set
+
+ dest.defaultChecked = dest.checked = src.checked;
+
+ // IE6-7 get confused and end up setting the value of a cloned
+ // checkbox/radio button to an empty string instead of "on"
+ if ( dest.value !== src.value ) {
+ dest.value = src.value;
+ }
+
+ // IE6-8 fails to return the selected option to the default selected
+ // state when cloning options
+ } else if ( nodeName === "option" ) {
+ dest.selected = src.defaultSelected;
+
+ // IE6-8 fails to set the defaultValue to the correct value when
+ // cloning other types of input fields
+ } else if ( nodeName === "input" || nodeName === "textarea" ) {
+ dest.defaultValue = src.defaultValue;
+
+ // IE blanks contents when cloning scripts
+ } else if ( nodeName === "script" && dest.text !== src.text ) {
+ dest.text = src.text;
+ }
+
+ // Event data gets referenced instead of copied if the expando
+ // gets copied too
+ dest.removeAttribute( jQuery.expando );
+}
+
+jQuery.buildFragment = function( args, context, scripts ) {
+ var fragment, cacheable, cachehit,
+ first = args[ 0 ];
+
+ // Set context from what may come in as undefined or a jQuery collection or a node
+ // Updated to fix #12266 where accessing context[0] could throw an exception in IE9/10 &
+ // also doubles as fix for #8950 where plain objects caused createDocumentFragment exception
+ context = context || document;
+ context = !context.nodeType && context[0] || context;
+ context = context.ownerDocument || context;
+
+ // Only cache "small" (1/2 KB) HTML strings that are associated with the main document
+ // Cloning options loses the selected state, so don't cache them
+ // IE 6 doesn't like it when you put <object> or <embed> elements in a fragment
+ // Also, WebKit does not clone 'checked' attributes on cloneNode, so don't cache
+ // Lastly, IE6,7,8 will not correctly reuse cached fragments that were created from unknown elems #10501
+ if ( args.length === 1 && typeof first === "string" && first.length < 512 && context === document &&
+ first.charAt(0) === "<" && !rnocache.test( first ) &&
+ (jQuery.support.checkClone || !rchecked.test( first )) &&
+ (jQuery.support.html5Clone || !rnoshimcache.test( first )) ) {
+
+ // Mark cacheable and look for a hit
+ cacheable = true;
+ fragment = jQuery.fragments[ first ];
+ cachehit = fragment !== undefined;
+ }
+
+ if ( !fragment ) {
+ fragment = context.createDocumentFragment();
+ jQuery.clean( args, context, fragment, scripts );
+
+ // Update the cache, but only store false
+ // unless this is a second parsing of the same content
+ if ( cacheable ) {
+ jQuery.fragments[ first ] = cachehit && fragment;
+ }
+ }
+
+ return { fragment: fragment, cacheable: cacheable };
+};
+
+jQuery.fragments = {};
+
+jQuery.each({
+ appendTo: "append",
+ prependTo: "prepend",
+ insertBefore: "before",
+ insertAfter: "after",
+ replaceAll: "replaceWith"
+}, function( name, original ) {
+ jQuery.fn[ name ] = function( selector ) {
+ var elems,
+ i = 0,
+ ret = [],
+ insert = jQuery( selector ),
+ l = insert.length,
+ parent = this.length === 1 && this[0].parentNode;
+
+ if ( (parent == null || parent && parent.nodeType === 11 && parent.childNodes.length === 1) && l === 1 ) {
+ insert[ original ]( this[0] );
+ return this;
+ } else {
+ for ( ; i < l; i++ ) {
+ elems = ( i > 0 ? this.clone(true) : this ).get();
+ jQuery( insert[i] )[ original ]( elems );
+ ret = ret.concat( elems );
+ }
+
+ return this.pushStack( ret, name, insert.selector );
+ }
+ };
+});
+
+function getAll( elem ) {
+ if ( typeof elem.getElementsByTagName !== "undefined" ) {
+ return elem.getElementsByTagName( "*" );
+
+ } else if ( typeof elem.querySelectorAll !== "undefined" ) {
+ return elem.querySelectorAll( "*" );
+
+ } else {
+ return [];
+ }
+}
+
+// Used in clean, fixes the defaultChecked property
+function fixDefaultChecked( elem ) {
+ if ( rcheckableType.test( elem.type ) ) {
+ elem.defaultChecked = elem.checked;
+ }
+}
+
+jQuery.extend({
+ clone: function( elem, dataAndEvents, deepDataAndEvents ) {
+ var srcElements,
+ destElements,
+ i,
+ clone;
+
+ if ( jQuery.support.html5Clone || jQuery.isXMLDoc(elem) || !rnoshimcache.test( "<" + elem.nodeName + ">" ) ) {
+ clone = elem.cloneNode( true );
+
+ // IE<=8 does not properly clone detached, unknown element nodes
+ } else {
+ fragmentDiv.innerHTML = elem.outerHTML;
+ fragmentDiv.removeChild( clone = fragmentDiv.firstChild );
+ }
+
+ if ( (!jQuery.support.noCloneEvent || !jQuery.support.noCloneChecked) &&
+ (elem.nodeType === 1 || elem.nodeType === 11) && !jQuery.isXMLDoc(elem) ) {
+ // IE copies events bound via attachEvent when using cloneNode.
+ // Calling detachEvent on the clone will also remove the events
+ // from the original. In order to get around this, we use some
+ // proprietary methods to clear the events. Thanks to MooTools
+ // guys for this hotness.
+
+ cloneFixAttributes( elem, clone );
+
+ // Using Sizzle here is crazy slow, so we use getElementsByTagName instead
+ srcElements = getAll( elem );
+ destElements = getAll( clone );
+
+ // Weird iteration because IE will replace the length property
+ // with an element if you are cloning the body and one of the
+ // elements on the page has a name or id of "length"
+ for ( i = 0; srcElements[i]; ++i ) {
+ // Ensure that the destination node is not null; Fixes #9587
+ if ( destElements[i] ) {
+ cloneFixAttributes( srcElements[i], destElements[i] );
+ }
+ }
+ }
+
+ // Copy the events from the original to the clone
+ if ( dataAndEvents ) {
+ cloneCopyEvent( elem, clone );
+
+ if ( deepDataAndEvents ) {
+ srcElements = getAll( elem );
+ destElements = getAll( clone );
+
+ for ( i = 0; srcElements[i]; ++i ) {
+ cloneCopyEvent( srcElements[i], destElements[i] );
+ }
+ }
+ }
+
+ srcElements = destElements = null;
+
+ // Return the cloned set
+ return clone;
+ },
+
+ clean: function( elems, context, fragment, scripts ) {
+ var i, j, elem, tag, wrap, depth, div, hasBody, tbody, len, handleScript, jsTags,
+ safe = context === document && safeFragment,
+ ret = [];
+
+ // Ensure that context is a document
+ if ( !context || typeof context.createDocumentFragment === "undefined" ) {
+ context = document;
+ }
+
+ // Use the already-created safe fragment if context permits
+ for ( i = 0; (elem = elems[i]) != null; i++ ) {
+ if ( typeof elem === "number" ) {
+ elem += "";
+ }
+
+ if ( !elem ) {
+ continue;
+ }
+
+ // Convert html string into DOM nodes
+ if ( typeof elem === "string" ) {
+ if ( !rhtml.test( elem ) ) {
+ elem = context.createTextNode( elem );
+ } else {
+ // Ensure a safe container in which to render the html
+ safe = safe || createSafeFragment( context );
+ div = context.createElement("div");
+ safe.appendChild( div );
+
+ // Fix "XHTML"-style tags in all browsers
+ elem = elem.replace(rxhtmlTag, "<$1></$2>");
+
+ // Go to html and back, then peel off extra wrappers
+ tag = ( rtagName.exec( elem ) || ["", ""] )[1].toLowerCase();
+ wrap = wrapMap[ tag ] || wrapMap._default;
+ depth = wrap[0];
+ div.innerHTML = wrap[1] + elem + wrap[2];
+
+ // Move to the right depth
+ while ( depth-- ) {
+ div = div.lastChild;
+ }
+
+ // Remove IE's autoinserted <tbody> from table fragments
+ if ( !jQuery.support.tbody ) {
+
+ // String was a <table>, *may* have spurious <tbody>
+ hasBody = rtbody.test(elem);
+ tbody = tag === "table" && !hasBody ?
+ div.firstChild && div.firstChild.childNodes :
+
+ // String was a bare <thead> or <tfoot>
+ wrap[1] === "<table>" && !hasBody ?
+ div.childNodes :
+ [];
+
+ for ( j = tbody.length - 1; j >= 0 ; --j ) {
+ if ( jQuery.nodeName( tbody[ j ], "tbody" ) && !tbody[ j ].childNodes.length ) {
+ tbody[ j ].parentNode.removeChild( tbody[ j ] );
+ }
+ }
+ }
+
+ // IE completely kills leading whitespace when innerHTML is used
+ if ( !jQuery.support.leadingWhitespace && rleadingWhitespace.test( elem ) ) {
+ div.insertBefore( context.createTextNode( rleadingWhitespace.exec(elem)[0] ), div.firstChild );
+ }
+
+ elem = div.childNodes;
+
+ // Take out of fragment container (we need a fresh div each time)
+ div.parentNode.removeChild( div );
+ }
+ }
+
+ if ( elem.nodeType ) {
+ ret.push( elem );
+ } else {
+ jQuery.merge( ret, elem );
+ }
+ }
+
+ // Fix #11356: Clear elements from safeFragment
+ if ( div ) {
+ elem = div = safe = null;
+ }
+
+ // Reset defaultChecked for any radios and checkboxes
+ // about to be appended to the DOM in IE 6/7 (#8060)
+ if ( !jQuery.support.appendChecked ) {
+ for ( i = 0; (elem = ret[i]) != null; i++ ) {
+ if ( jQuery.nodeName( elem, "input" ) ) {
+ fixDefaultChecked( elem );
+ } else if ( typeof elem.getElementsByTagName !== "undefined" ) {
+ jQuery.grep( elem.getElementsByTagName("input"), fixDefaultChecked );
+ }
+ }
+ }
+
+ // Append elements to a provided document fragment
+ if ( fragment ) {
+ // Special handling of each script element
+ handleScript = function( elem ) {
+ // Check if we consider it executable
+ if ( !elem.type || rscriptType.test( elem.type ) ) {
+ // Detach the script and store it in the scripts array (if provided) or the fragment
+ // Return truthy to indicate that it has been handled
+ return scripts ?
+ scripts.push( elem.parentNode ? elem.parentNode.removeChild( elem ) : elem ) :
+ fragment.appendChild( elem );
+ }
+ };
+
+ for ( i = 0; (elem = ret[i]) != null; i++ ) {
+ // Check if we're done after handling an executable script
+ if ( !( jQuery.nodeName( elem, "script" ) && handleScript( elem ) ) ) {
+ // Append to fragment and handle embedded scripts
+ fragment.appendChild( elem );
+ if ( typeof elem.getElementsByTagName !== "undefined" ) {
+ // handleScript alters the DOM, so use jQuery.merge to ensure snapshot iteration
+ jsTags = jQuery.grep( jQuery.merge( [], elem.getElementsByTagName("script") ), handleScript );
+
+ // Splice the scripts into ret after their former ancestor and advance our index beyond them
+ ret.splice.apply( ret, [i + 1, 0].concat( jsTags ) );
+ i += jsTags.length;
+ }
+ }
+ }
+ }
+
+ return ret;
+ },
+
+ cleanData: function( elems, /* internal */ acceptData ) {
+ var data, id, elem, type,
+ i = 0,
+ internalKey = jQuery.expando,
+ cache = jQuery.cache,
+ deleteExpando = jQuery.support.deleteExpando,
+ special = jQuery.event.special;
+
+ for ( ; (elem = elems[i]) != null; i++ ) {
+
+ if ( acceptData || jQuery.acceptData( elem ) ) {
+
+ id = elem[ internalKey ];
+ data = id && cache[ id ];
+
+ if ( data ) {
+ if ( data.events ) {
+ for ( type in data.events ) {
+ if ( special[ type ] ) {
+ jQuery.event.remove( elem, type );
+
+ // This is a shortcut to avoid jQuery.event.remove's overhead
+ } else {
+ jQuery.removeEvent( elem, type, data.handle );
+ }
+ }
+ }
+
+ // Remove cache only if it was not already removed by jQuery.event.remove
+ if ( cache[ id ] ) {
+
+ delete cache[ id ];
+
+ // IE does not allow us to delete expando properties from nodes,
+ // nor does it have a removeAttribute function on Document nodes;
+ // we must handle all of these cases
+ if ( deleteExpando ) {
+ delete elem[ internalKey ];
+
+ } else if ( elem.removeAttribute ) {
+ elem.removeAttribute( internalKey );
+
+ } else {
+ elem[ internalKey ] = null;
+ }
+
+ jQuery.deletedIds.push( id );
+ }
+ }
+ }
+ }
+ }
+});
+// Limit scope pollution from any deprecated API
+(function() {
+
+var matched, browser;
+
+// Use of jQuery.browser is frowned upon.
+// More details: http://api.jquery.com/jQuery.browser
+// jQuery.uaMatch maintained for back-compat
+jQuery.uaMatch = function( ua ) {
+ ua = ua.toLowerCase();
+
+ var match = /(chrome)[ \/]([\w.]+)/.exec( ua ) ||
+ /(webkit)[ \/]([\w.]+)/.exec( ua ) ||
+ /(opera)(?:.*version|)[ \/]([\w.]+)/.exec( ua ) ||
+ /(msie) ([\w.]+)/.exec( ua ) ||
+ ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec( ua ) ||
+ [];
+
+ return {
+ browser: match[ 1 ] || "",
+ version: match[ 2 ] || "0"
+ };
+};
+
+matched = jQuery.uaMatch( navigator.userAgent );
+browser = {};
+
+if ( matched.browser ) {
+ browser[ matched.browser ] = true;
+ browser.version = matched.version;
+}
+
+// Chrome is Webkit, but Webkit is also Safari.
+if ( browser.chrome ) {
+ browser.webkit = true;
+} else if ( browser.webkit ) {
+ browser.safari = true;
+}
+
+jQuery.browser = browser;
+
+jQuery.sub = function() {
+ function jQuerySub( selector, context ) {
+ return new jQuerySub.fn.init( selector, context );
+ }
+ jQuery.extend( true, jQuerySub, this );
+ jQuerySub.superclass = this;
+ jQuerySub.fn = jQuerySub.prototype = this();
+ jQuerySub.fn.constructor = jQuerySub;
+ jQuerySub.sub = this.sub;
+ jQuerySub.fn.init = function init( selector, context ) {
+ if ( context && context instanceof jQuery && !(context instanceof jQuerySub) ) {
+ context = jQuerySub( context );
+ }
+
+ return jQuery.fn.init.call( this, selector, context, rootjQuerySub );
+ };
+ jQuerySub.fn.init.prototype = jQuerySub.fn;
+ var rootjQuerySub = jQuerySub(document);
+ return jQuerySub;
+};
+
+})();
+var curCSS, iframe, iframeDoc,
+ ralpha = /alpha\([^)]*\)/i,
+ ropacity = /opacity=([^)]*)/,
+ rposition = /^(top|right|bottom|left)$/,
+ // swappable if display is none or starts with table except "table", "table-cell", or "table-caption"
+ // see here for display values: https://developer.mozilla.org/en-US/docs/CSS/display
+ rdisplayswap = /^(none|table(?!-c[ea]).+)/,
+ rmargin = /^margin/,
+ rnumsplit = new RegExp( "^(" + core_pnum + ")(.*)$", "i" ),
+ rnumnonpx = new RegExp( "^(" + core_pnum + ")(?!px)[a-z%]+$", "i" ),
+ rrelNum = new RegExp( "^([-+])=(" + core_pnum + ")", "i" ),
+ elemdisplay = { BODY: "block" },
+
+ cssShow = { position: "absolute", visibility: "hidden", display: "block" },
+ cssNormalTransform = {
+ letterSpacing: 0,
+ fontWeight: 400
+ },
+
+ cssExpand = [ "Top", "Right", "Bottom", "Left" ],
+ cssPrefixes = [ "Webkit", "O", "Moz", "ms" ],
+
+ eventsToggle = jQuery.fn.toggle;
+
+// return a css property mapped to a potentially vendor prefixed property
+function vendorPropName( style, name ) {
+
+ // shortcut for names that are not vendor prefixed
+ if ( name in style ) {
+ return name;
+ }
+
+ // check for vendor prefixed names
+ var capName = name.charAt(0).toUpperCase() + name.slice(1),
+ origName = name,
+ i = cssPrefixes.length;
+
+ while ( i-- ) {
+ name = cssPrefixes[ i ] + capName;
+ if ( name in style ) {
+ return name;
+ }
+ }
+
+ return origName;
+}
+
+function isHidden( elem, el ) {
+ elem = el || elem;
+ return jQuery.css( elem, "display" ) === "none" || !jQuery.contains( elem.ownerDocument, elem );
+}
+
+function showHide( elements, show ) {
+ var elem, display,
+ values = [],
+ index = 0,
+ length = elements.length;
+
+ for ( ; index < length; index++ ) {
+ elem = elements[ index ];
+ if ( !elem.style ) {
+ continue;
+ }
+ values[ index ] = jQuery._data( elem, "olddisplay" );
+ if ( show ) {
+ // Reset the inline display of this element to learn if it is
+ // being hidden by cascaded rules or not
+ if ( !values[ index ] && elem.style.display === "none" ) {
+ elem.style.display = "";
+ }
+
+ // Set elements which have been overridden with display: none
+ // in a stylesheet to whatever the default browser style is
+ // for such an element
+ if ( elem.style.display === "" && isHidden( elem ) ) {
+ values[ index ] = jQuery._data( elem, "olddisplay", css_defaultDisplay(elem.nodeName) );
+ }
+ } else {
+ display = curCSS( elem, "display" );
+
+ if ( !values[ index ] && display !== "none" ) {
+ jQuery._data( elem, "olddisplay", display );
+ }
+ }
+ }
+
+ // Set the display of most of the elements in a second loop
+ // to avoid the constant reflow
+ for ( index = 0; index < length; index++ ) {
+ elem = elements[ index ];
+ if ( !elem.style ) {
+ continue;
+ }
+ if ( !show || elem.style.display === "none" || elem.style.display === "" ) {
+ elem.style.display = show ? values[ index ] || "" : "none";
+ }
+ }
+
+ return elements;
+}
+
+jQuery.fn.extend({
+ css: function( name, value ) {
+ return jQuery.access( this, function( elem, name, value ) {
+ return value !== undefined ?
+ jQuery.style( elem, name, value ) :
+ jQuery.css( elem, name );
+ }, name, value, arguments.length > 1 );
+ },
+ show: function() {
+ return showHide( this, true );
+ },
+ hide: function() {
+ return showHide( this );
+ },
+ toggle: function( state, fn2 ) {
+ var bool = typeof state === "boolean";
+
+ if ( jQuery.isFunction( state ) && jQuery.isFunction( fn2 ) ) {
+ return eventsToggle.apply( this, arguments );
+ }
+
+ return this.each(function() {
+ if ( bool ? state : isHidden( this ) ) {
+ jQuery( this ).show();
+ } else {
+ jQuery( this ).hide();
+ }
+ });
+ }
+});
+
+jQuery.extend({
+ // Add in style property hooks for overriding the default
+ // behavior of getting and setting a style property
+ cssHooks: {
+ opacity: {
+ get: function( elem, computed ) {
+ if ( computed ) {
+ // We should always get a number back from opacity
+ var ret = curCSS( elem, "opacity" );
+ return ret === "" ? "1" : ret;
+
+ }
+ }
+ }
+ },
+
+ // Exclude the following css properties to add px
+ cssNumber: {
+ "fillOpacity": true,
+ "fontWeight": true,
+ "lineHeight": true,
+ "opacity": true,
+ "orphans": true,
+ "widows": true,
+ "zIndex": true,
+ "zoom": true
+ },
+
+ // Add in properties whose names you wish to fix before
+ // setting or getting the value
+ cssProps: {
+ // normalize float css property
+ "float": jQuery.support.cssFloat ? "cssFloat" : "styleFloat"
+ },
+
+ // Get and set the style property on a DOM Node
+ style: function( elem, name, value, extra ) {
+ // Don't set styles on text and comment nodes
+ if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) {
+ return;
+ }
+
+ // Make sure that we're working with the right name
+ var ret, type, hooks,
+ origName = jQuery.camelCase( name ),
+ style = elem.style;
+
+ name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( style, origName ) );
+
+ // gets hook for the prefixed version
+ // followed by the unprefixed version
+ hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ];
+
+ // Check if we're setting a value
+ if ( value !== undefined ) {
+ type = typeof value;
+
+ // convert relative number strings (+= or -=) to relative numbers. #7345
+ if ( type === "string" && (ret = rrelNum.exec( value )) ) {
+ value = ( ret[1] + 1 ) * ret[2] + parseFloat( jQuery.css( elem, name ) );
+ // Fixes bug #9237
+ type = "number";
+ }
+
+ // Make sure that NaN and null values aren't set. See: #7116
+ if ( value == null || type === "number" && isNaN( value ) ) {
+ return;
+ }
+
+ // If a number was passed in, add 'px' to the (except for certain CSS properties)
+ if ( type === "number" && !jQuery.cssNumber[ origName ] ) {
+ value += "px";
+ }
+
+ // If a hook was provided, use that value, otherwise just set the specified value
+ if ( !hooks || !("set" in hooks) || (value = hooks.set( elem, value, extra )) !== undefined ) {
+ // Wrapped to prevent IE from throwing errors when 'invalid' values are provided
+ // Fixes bug #5509
+ try {
+ style[ name ] = value;
+ } catch(e) {}
+ }
+
+ } else {
+ // If a hook was provided get the non-computed value from there
+ if ( hooks && "get" in hooks && (ret = hooks.get( elem, false, extra )) !== undefined ) {
+ return ret;
+ }
+
+ // Otherwise just get the value from the style object
+ return style[ name ];
+ }
+ },
+
+ css: function( elem, name, numeric, extra ) {
+ var val, num, hooks,
+ origName = jQuery.camelCase( name );
+
+ // Make sure that we're working with the right name
+ name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( elem.style, origName ) );
+
+ // gets hook for the prefixed version
+ // followed by the unprefixed version
+ hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ];
+
+ // If a hook was provided get the computed value from there
+ if ( hooks && "get" in hooks ) {
+ val = hooks.get( elem, true, extra );
+ }
+
+ // Otherwise, if a way to get the computed value exists, use that
+ if ( val === undefined ) {
+ val = curCSS( elem, name );
+ }
+
+ //convert "normal" to computed value
+ if ( val === "normal" && name in cssNormalTransform ) {
+ val = cssNormalTransform[ name ];
+ }
+
+ // Return, converting to number if forced or a qualifier was provided and val looks numeric
+ if ( numeric || extra !== undefined ) {
+ num = parseFloat( val );
+ return numeric || jQuery.isNumeric( num ) ? num || 0 : val;
+ }
+ return val;
+ },
+
+ // A method for quickly swapping in/out CSS properties to get correct calculations
+ swap: function( elem, options, callback ) {
+ var ret, name,
+ old = {};
+
+ // Remember the old values, and insert the new ones
+ for ( name in options ) {
+ old[ name ] = elem.style[ name ];
+ elem.style[ name ] = options[ name ];
+ }
+
+ ret = callback.call( elem );
+
+ // Revert the old values
+ for ( name in options ) {
+ elem.style[ name ] = old[ name ];
+ }
+
+ return ret;
+ }
+});
+
+// NOTE: To any future maintainer, we've window.getComputedStyle
+// because jsdom on node.js will break without it.
+if ( window.getComputedStyle ) {
+ curCSS = function( elem, name ) {
+ var ret, width, minWidth, maxWidth,
+ computed = window.getComputedStyle( elem, null ),
+ style = elem.style;
+
+ if ( computed ) {
+
+ // getPropertyValue is only needed for .css('filter') in IE9, see #12537
+ ret = computed.getPropertyValue( name ) || computed[ name ];
+
+ if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) {
+ ret = jQuery.style( elem, name );
+ }
+
+ // A tribute to the "awesome hack by Dean Edwards"
+ // Chrome < 17 and Safari 5.0 uses "computed value" instead of "used value" for margin-right
+ // Safari 5.1.7 (at least) returns percentage for a larger set of values, but width seems to be reliably pixels
+ // this is against the CSSOM draft spec: http://dev.w3.org/csswg/cssom/#resolved-values
+ if ( rnumnonpx.test( ret ) && rmargin.test( name ) ) {
+ width = style.width;
+ minWidth = style.minWidth;
+ maxWidth = style.maxWidth;
+
+ style.minWidth = style.maxWidth = style.width = ret;
+ ret = computed.width;
+
+ style.width = width;
+ style.minWidth = minWidth;
+ style.maxWidth = maxWidth;
+ }
+ }
+
+ return ret;
+ };
+} else if ( document.documentElement.currentStyle ) {
+ curCSS = function( elem, name ) {
+ var left, rsLeft,
+ ret = elem.currentStyle && elem.currentStyle[ name ],
+ style = elem.style;
+
+ // Avoid setting ret to empty string here
+ // so we don't default to auto
+ if ( ret == null && style && style[ name ] ) {
+ ret = style[ name ];
+ }
+
+ // From the awesome hack by Dean Edwards
+ // http://erik.eae.net/archives/2007/07/27/18.54.15/#comment-102291
+
+ // If we're not dealing with a regular pixel number
+ // but a number that has a weird ending, we need to convert it to pixels
+ // but not position css attributes, as those are proportional to the parent element instead
+ // and we can't measure the parent instead because it might trigger a "stacking dolls" problem
+ if ( rnumnonpx.test( ret ) && !rposition.test( name ) ) {
+
+ // Remember the original values
+ left = style.left;
+ rsLeft = elem.runtimeStyle && elem.runtimeStyle.left;
+
+ // Put in the new values to get a computed value out
+ if ( rsLeft ) {
+ elem.runtimeStyle.left = elem.currentStyle.left;
+ }
+ style.left = name === "fontSize" ? "1em" : ret;
+ ret = style.pixelLeft + "px";
+
+ // Revert the changed values
+ style.left = left;
+ if ( rsLeft ) {
+ elem.runtimeStyle.left = rsLeft;
+ }
+ }
+
+ return ret === "" ? "auto" : ret;
+ };
+}
+
+function setPositiveNumber( elem, value, subtract ) {
+ var matches = rnumsplit.exec( value );
+ return matches ?
+ Math.max( 0, matches[ 1 ] - ( subtract || 0 ) ) + ( matches[ 2 ] || "px" ) :
+ value;
+}
+
+function augmentWidthOrHeight( elem, name, extra, isBorderBox ) {
+ var i = extra === ( isBorderBox ? "border" : "content" ) ?
+ // If we already have the right measurement, avoid augmentation
+ 4 :
+ // Otherwise initialize for horizontal or vertical properties
+ name === "width" ? 1 : 0,
+
+ val = 0;
+
+ for ( ; i < 4; i += 2 ) {
+ // both box models exclude margin, so add it if we want it
+ if ( extra === "margin" ) {
+ // we use jQuery.css instead of curCSS here
+ // because of the reliableMarginRight CSS hook!
+ val += jQuery.css( elem, extra + cssExpand[ i ], true );
+ }
+
+ // From this point on we use curCSS for maximum performance (relevant in animations)
+ if ( isBorderBox ) {
+ // border-box includes padding, so remove it if we want content
+ if ( extra === "content" ) {
+ val -= parseFloat( curCSS( elem, "padding" + cssExpand[ i ] ) ) || 0;
+ }
+
+ // at this point, extra isn't border nor margin, so remove border
+ if ( extra !== "margin" ) {
+ val -= parseFloat( curCSS( elem, "border" + cssExpand[ i ] + "Width" ) ) || 0;
+ }
+ } else {
+ // at this point, extra isn't content, so add padding
+ val += parseFloat( curCSS( elem, "padding" + cssExpand[ i ] ) ) || 0;
+
+ // at this point, extra isn't content nor padding, so add border
+ if ( extra !== "padding" ) {
+ val += parseFloat( curCSS( elem, "border" + cssExpand[ i ] + "Width" ) ) || 0;
+ }
+ }
+ }
+
+ return val;
+}
+
+function getWidthOrHeight( elem, name, extra ) {
+
+ // Start with offset property, which is equivalent to the border-box value
+ var val = name === "width" ? elem.offsetWidth : elem.offsetHeight,
+ valueIsBorderBox = true,
+ isBorderBox = jQuery.support.boxSizing && jQuery.css( elem, "boxSizing" ) === "border-box";
+
+ // some non-html elements return undefined for offsetWidth, so check for null/undefined
+ // svg - https://bugzilla.mozilla.org/show_bug.cgi?id=649285
+ // MathML - https://bugzilla.mozilla.org/show_bug.cgi?id=491668
+ if ( val <= 0 || val == null ) {
+ // Fall back to computed then uncomputed css if necessary
+ val = curCSS( elem, name );
+ if ( val < 0 || val == null ) {
+ val = elem.style[ name ];
+ }
+
+ // Computed unit is not pixels. Stop here and return.
+ if ( rnumnonpx.test(val) ) {
+ return val;
+ }
+
+ // we need the check for style in case a browser which returns unreliable values
+ // for getComputedStyle silently falls back to the reliable elem.style
+ valueIsBorderBox = isBorderBox && ( jQuery.support.boxSizingReliable || val === elem.style[ name ] );
+
+ // Normalize "", auto, and prepare for extra
+ val = parseFloat( val ) || 0;
+ }
+
+ // use the active box-sizing model to add/subtract irrelevant styles
+ return ( val +
+ augmentWidthOrHeight(
+ elem,
+ name,
+ extra || ( isBorderBox ? "border" : "content" ),
+ valueIsBorderBox
+ )
+ ) + "px";
+}
+
+
+// Try to determine the default display value of an element
+function css_defaultDisplay( nodeName ) {
+ if ( elemdisplay[ nodeName ] ) {
+ return elemdisplay[ nodeName ];
+ }
+
+ var elem = jQuery( "<" + nodeName + ">" ).appendTo( document.body ),
+ display = elem.css("display");
+ elem.remove();
+
+ // If the simple way fails,
+ // get element's real default display by attaching it to a temp iframe
+ if ( display === "none" || display === "" ) {
+ // Use the already-created iframe if possible
+ iframe = document.body.appendChild(
+ iframe || jQuery.extend( document.createElement("iframe"), {
+ frameBorder: 0,
+ width: 0,
+ height: 0
+ })
+ );
+
+ // Create a cacheable copy of the iframe document on first call.
+ // IE and Opera will allow us to reuse the iframeDoc without re-writing the fake HTML
+ // document to it; WebKit & Firefox won't allow reusing the iframe document.
+ if ( !iframeDoc || !iframe.createElement ) {
+ iframeDoc = ( iframe.contentWindow || iframe.contentDocument ).document;
+ iframeDoc.write("<!doctype html><html><body>");
+ iframeDoc.close();
+ }
+
+ elem = iframeDoc.body.appendChild( iframeDoc.createElement(nodeName) );
+
+ display = curCSS( elem, "display" );
+ document.body.removeChild( iframe );
+ }
+
+ // Store the correct default display
+ elemdisplay[ nodeName ] = display;
+
+ return display;
+}
+
+jQuery.each([ "height", "width" ], function( i, name ) {
+ jQuery.cssHooks[ name ] = {
+ get: function( elem, computed, extra ) {
+ if ( computed ) {
+ // certain elements can have dimension info if we invisibly show them
+ // however, it must have a current display style that would benefit from this
+ if ( elem.offsetWidth === 0 && rdisplayswap.test( curCSS( elem, "display" ) ) ) {
+ return jQuery.swap( elem, cssShow, function() {
+ return getWidthOrHeight( elem, name, extra );
+ });
+ } else {
+ return getWidthOrHeight( elem, name, extra );
+ }
+ }
+ },
+
+ set: function( elem, value, extra ) {
+ return setPositiveNumber( elem, value, extra ?
+ augmentWidthOrHeight(
+ elem,
+ name,
+ extra,
+ jQuery.support.boxSizing && jQuery.css( elem, "boxSizing" ) === "border-box"
+ ) : 0
+ );
+ }
+ };
+});
+
+if ( !jQuery.support.opacity ) {
+ jQuery.cssHooks.opacity = {
+ get: function( elem, computed ) {
+ // IE uses filters for opacity
+ return ropacity.test( (computed && elem.currentStyle ? elem.currentStyle.filter : elem.style.filter) || "" ) ?
+ ( 0.01 * parseFloat( RegExp.$1 ) ) + "" :
+ computed ? "1" : "";
+ },
+
+ set: function( elem, value ) {
+ var style = elem.style,
+ currentStyle = elem.currentStyle,
+ opacity = jQuery.isNumeric( value ) ? "alpha(opacity=" + value * 100 + ")" : "",
+ filter = currentStyle && currentStyle.filter || style.filter || "";
+
+ // IE has trouble with opacity if it does not have layout
+ // Force it by setting the zoom level
+ style.zoom = 1;
+
+ // if setting opacity to 1, and no other filters exist - attempt to remove filter attribute #6652
+ if ( value >= 1 && jQuery.trim( filter.replace( ralpha, "" ) ) === "" &&
+ style.removeAttribute ) {
+
+ // Setting style.filter to null, "" & " " still leave "filter:" in the cssText
+ // if "filter:" is present at all, clearType is disabled, we want to avoid this
+ // style.removeAttribute is IE Only, but so apparently is this code path...
+ style.removeAttribute( "filter" );
+
+ // if there there is no filter style applied in a css rule, we are done
+ if ( currentStyle && !currentStyle.filter ) {
+ return;
+ }
+ }
+
+ // otherwise, set new filter values
+ style.filter = ralpha.test( filter ) ?
+ filter.replace( ralpha, opacity ) :
+ filter + " " + opacity;
+ }
+ };
+}
+
+// These hooks cannot be added until DOM ready because the support test
+// for it is not run until after DOM ready
+jQuery(function() {
+ if ( !jQuery.support.reliableMarginRight ) {
+ jQuery.cssHooks.marginRight = {
+ get: function( elem, computed ) {
+ // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right
+ // Work around by temporarily setting element display to inline-block
+ return jQuery.swap( elem, { "display": "inline-block" }, function() {
+ if ( computed ) {
+ return curCSS( elem, "marginRight" );
+ }
+ });
+ }
+ };
+ }
+
+ // Webkit bug: https://bugs.webkit.org/show_bug.cgi?id=29084
+ // getComputedStyle returns percent when specified for top/left/bottom/right
+ // rather than make the css module depend on the offset module, we just check for it here
+ if ( !jQuery.support.pixelPosition && jQuery.fn.position ) {
+ jQuery.each( [ "top", "left" ], function( i, prop ) {
+ jQuery.cssHooks[ prop ] = {
+ get: function( elem, computed ) {
+ if ( computed ) {
+ var ret = curCSS( elem, prop );
+ // if curCSS returns percentage, fallback to offset
+ return rnumnonpx.test( ret ) ? jQuery( elem ).position()[ prop ] + "px" : ret;
+ }
+ }
+ };
+ });
+ }
+
+});
+
+if ( jQuery.expr && jQuery.expr.filters ) {
+ jQuery.expr.filters.hidden = function( elem ) {
+ return ( elem.offsetWidth === 0 && elem.offsetHeight === 0 ) || (!jQuery.support.reliableHiddenOffsets && ((elem.style && elem.style.display) || curCSS( elem, "display" )) === "none");
+ };
+
+ jQuery.expr.filters.visible = function( elem ) {
+ return !jQuery.expr.filters.hidden( elem );
+ };
+}
+
+// These hooks are used by animate to expand properties
+jQuery.each({
+ margin: "",
+ padding: "",
+ border: "Width"
+}, function( prefix, suffix ) {
+ jQuery.cssHooks[ prefix + suffix ] = {
+ expand: function( value ) {
+ var i,
+
+ // assumes a single number if not a string
+ parts = typeof value === "string" ? value.split(" ") : [ value ],
+ expanded = {};
+
+ for ( i = 0; i < 4; i++ ) {
+ expanded[ prefix + cssExpand[ i ] + suffix ] =
+ parts[ i ] || parts[ i - 2 ] || parts[ 0 ];
+ }
+
+ return expanded;
+ }
+ };
+
+ if ( !rmargin.test( prefix ) ) {
+ jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber;
+ }
+});
+var r20 = /%20/g,
+ rbracket = /\[\]$/,
+ rCRLF = /\r?\n/g,
+ rinput = /^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,
+ rselectTextarea = /^(?:select|textarea)/i;
+
+jQuery.fn.extend({
+ serialize: function() {
+ return jQuery.param( this.serializeArray() );
+ },
+ serializeArray: function() {
+ return this.map(function(){
+ return this.elements ? jQuery.makeArray( this.elements ) : this;
+ })
+ .filter(function(){
+ return this.name && !this.disabled &&
+ ( this.checked || rselectTextarea.test( this.nodeName ) ||
+ rinput.test( this.type ) );
+ })
+ .map(function( i, elem ){
+ var val = jQuery( this ).val();
+
+ return val == null ?
+ null :
+ jQuery.isArray( val ) ?
+ jQuery.map( val, function( val, i ){
+ return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) };
+ }) :
+ { name: elem.name, value: val.replace( rCRLF, "\r\n" ) };
+ }).get();
+ }
+});
+
+//Serialize an array of form elements or a set of
+//key/values into a query string
+jQuery.param = function( a, traditional ) {
+ var prefix,
+ s = [],
+ add = function( key, value ) {
+ // If value is a function, invoke it and return its value
+ value = jQuery.isFunction( value ) ? value() : ( value == null ? "" : value );
+ s[ s.length ] = encodeURIComponent( key ) + "=" + encodeURIComponent( value );
+ };
+
+ // Set traditional to true for jQuery <= 1.3.2 behavior.
+ if ( traditional === undefined ) {
+ traditional = jQuery.ajaxSettings && jQuery.ajaxSettings.traditional;
+ }
+
+ // If an array was passed in, assume that it is an array of form elements.
+ if ( jQuery.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) {
+ // Serialize the form elements
+ jQuery.each( a, function() {
+ add( this.name, this.value );
+ });
+
+ } else {
+ // If traditional, encode the "old" way (the way 1.3.2 or older
+ // did it), otherwise encode params recursively.
+ for ( prefix in a ) {
+ buildParams( prefix, a[ prefix ], traditional, add );
+ }
+ }
+
+ // Return the resulting serialization
+ return s.join( "&" ).replace( r20, "+" );
+};
+
+function buildParams( prefix, obj, traditional, add ) {
+ var name;
+
+ if ( jQuery.isArray( obj ) ) {
+ // Serialize array item.
+ jQuery.each( obj, function( i, v ) {
+ if ( traditional || rbracket.test( prefix ) ) {
+ // Treat each array item as a scalar.
+ add( prefix, v );
+
+ } else {
+ // If array item is non-scalar (array or object), encode its
+ // numeric index to resolve deserialization ambiguity issues.
+ // Note that rack (as of 1.0.0) can't currently deserialize
+ // nested arrays properly, and attempting to do so may cause
+ // a server error. Possible fixes are to modify rack's
+ // deserialization algorithm or to provide an option or flag
+ // to force array serialization to be shallow.
+ buildParams( prefix + "[" + ( typeof v === "object" ? i : "" ) + "]", v, traditional, add );
+ }
+ });
+
+ } else if ( !traditional && jQuery.type( obj ) === "object" ) {
+ // Serialize object item.
+ for ( name in obj ) {
+ buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add );
+ }
+
+ } else {
+ // Serialize scalar item.
+ add( prefix, obj );
+ }
+}
+var
+ // Document location
+ ajaxLocParts,
+ ajaxLocation,
+
+ rhash = /#.*$/,
+ rheaders = /^(.*?):[ \t]*([^\r\n]*)\r?$/mg, // IE leaves an \r character at EOL
+ // #7653, #8125, #8152: local protocol detection
+ rlocalProtocol = /^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/,
+ rnoContent = /^(?:GET|HEAD)$/,
+ rprotocol = /^\/\//,
+ rquery = /\?/,
+ rscript = /<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,
+ rts = /([?&])_=[^&]*/,
+ rurl = /^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+)|)|)/,
+
+ // Keep a copy of the old load method
+ _load = jQuery.fn.load,
+
+ /* Prefilters
+ * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example)
+ * 2) These are called:
+ * - BEFORE asking for a transport
+ * - AFTER param serialization (s.data is a string if s.processData is true)
+ * 3) key is the dataType
+ * 4) the catchall symbol "*" can be used
+ * 5) execution will start with transport dataType and THEN continue down to "*" if needed
+ */
+ prefilters = {},
+
+ /* Transports bindings
+ * 1) key is the dataType
+ * 2) the catchall symbol "*" can be used
+ * 3) selection will start with transport dataType and THEN go to "*" if needed
+ */
+ transports = {},
+
+ // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression
+ allTypes = ["*/"] + ["*"];
+
+// #8138, IE may throw an exception when accessing
+// a field from window.location if document.domain has been set
+try {
+ ajaxLocation = location.href;
+} catch( e ) {
+ // Use the href attribute of an A element
+ // since IE will modify it given document.location
+ ajaxLocation = document.createElement( "a" );
+ ajaxLocation.href = "";
+ ajaxLocation = ajaxLocation.href;
+}
+
+// Segment location into parts
+ajaxLocParts = rurl.exec( ajaxLocation.toLowerCase() ) || [];
+
+// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport
+function addToPrefiltersOrTransports( structure ) {
+
+ // dataTypeExpression is optional and defaults to "*"
+ return function( dataTypeExpression, func ) {
+
+ if ( typeof dataTypeExpression !== "string" ) {
+ func = dataTypeExpression;
+ dataTypeExpression = "*";
+ }
+
+ var dataType, list, placeBefore,
+ dataTypes = dataTypeExpression.toLowerCase().split( core_rspace ),
+ i = 0,
+ length = dataTypes.length;
+
+ if ( jQuery.isFunction( func ) ) {
+ // For each dataType in the dataTypeExpression
+ for ( ; i < length; i++ ) {
+ dataType = dataTypes[ i ];
+ // We control if we're asked to add before
+ // any existing element
+ placeBefore = /^\+/.test( dataType );
+ if ( placeBefore ) {
+ dataType = dataType.substr( 1 ) || "*";
+ }
+ list = structure[ dataType ] = structure[ dataType ] || [];
+ // then we add to the structure accordingly
+ list[ placeBefore ? "unshift" : "push" ]( func );
+ }
+ }
+ };
+}
+
+// Base inspection function for prefilters and transports
+function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR,
+ dataType /* internal */, inspected /* internal */ ) {
+
+ dataType = dataType || options.dataTypes[ 0 ];
+ inspected = inspected || {};
+
+ inspected[ dataType ] = true;
+
+ var selection,
+ list = structure[ dataType ],
+ i = 0,
+ length = list ? list.length : 0,
+ executeOnly = ( structure === prefilters );
+
+ for ( ; i < length && ( executeOnly || !selection ); i++ ) {
+ selection = list[ i ]( options, originalOptions, jqXHR );
+ // If we got redirected to another dataType
+ // we try there if executing only and not done already
+ if ( typeof selection === "string" ) {
+ if ( !executeOnly || inspected[ selection ] ) {
+ selection = undefined;
+ } else {
+ options.dataTypes.unshift( selection );
+ selection = inspectPrefiltersOrTransports(
+ structure, options, originalOptions, jqXHR, selection, inspected );
+ }
+ }
+ }
+ // If we're only executing or nothing was selected
+ // we try the catchall dataType if not done already
+ if ( ( executeOnly || !selection ) && !inspected[ "*" ] ) {
+ selection = inspectPrefiltersOrTransports(
+ structure, options, originalOptions, jqXHR, "*", inspected );
+ }
+ // unnecessary when only executing (prefilters)
+ // but it'll be ignored by the caller in that case
+ return selection;
+}
+
+// A special extend for ajax options
+// that takes "flat" options (not to be deep extended)
+// Fixes #9887
+function ajaxExtend( target, src ) {
+ var key, deep,
+ flatOptions = jQuery.ajaxSettings.flatOptions || {};
+ for ( key in src ) {
+ if ( src[ key ] !== undefined ) {
+ ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ];
+ }
+ }
+ if ( deep ) {
+ jQuery.extend( true, target, deep );
+ }
+}
+
+jQuery.fn.load = function( url, params, callback ) {
+ if ( typeof url !== "string" && _load ) {
+ return _load.apply( this, arguments );
+ }
+
+ // Don't do a request if no elements are being requested
+ if ( !this.length ) {
+ return this;
+ }
+
+ var selector, type, response,
+ self = this,
+ off = url.indexOf(" ");
+
+ if ( off >= 0 ) {
+ selector = url.slice( off, url.length );
+ url = url.slice( 0, off );
+ }
+
+ // If it's a function
+ if ( jQuery.isFunction( params ) ) {
+
+ // We assume that it's the callback
+ callback = params;
+ params = undefined;
+
+ // Otherwise, build a param string
+ } else if ( params && typeof params === "object" ) {
+ type = "POST";
+ }
+
+ // Request the remote document
+ jQuery.ajax({
+ url: url,
+
+ // if "type" variable is undefined, then "GET" method will be used
+ type: type,
+ dataType: "html",
+ data: params,
+ complete: function( jqXHR, status ) {
+ if ( callback ) {
+ self.each( callback, response || [ jqXHR.responseText, status, jqXHR ] );
+ }
+ }
+ }).done(function( responseText ) {
+
+ // Save response for use in complete callback
+ response = arguments;
+
+ // See if a selector was specified
+ self.html( selector ?
+
+ // Create a dummy div to hold the results
+ jQuery("<div>")
+
+ // inject the contents of the document in, removing the scripts
+ // to avoid any 'Permission Denied' errors in IE
+ .append( responseText.replace( rscript, "" ) )
+
+ // Locate the specified elements
+ .find( selector ) :
+
+ // If not, just inject the full result
+ responseText );
+
+ });
+
+ return this;
+};
+
+// Attach a bunch of functions for handling common AJAX events
+jQuery.each( "ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split( " " ), function( i, o ){
+ jQuery.fn[ o ] = function( f ){
+ return this.on( o, f );
+ };
+});
+
+jQuery.each( [ "get", "post" ], function( i, method ) {
+ jQuery[ method ] = function( url, data, callback, type ) {
+ // shift arguments if data argument was omitted
+ if ( jQuery.isFunction( data ) ) {
+ type = type || callback;
+ callback = data;
+ data = undefined;
+ }
+
+ return jQuery.ajax({
+ type: method,
+ url: url,
+ data: data,
+ success: callback,
+ dataType: type
+ });
+ };
+});
+
+jQuery.extend({
+
+ getScript: function( url, callback ) {
+ return jQuery.get( url, undefined, callback, "script" );
+ },
+
+ getJSON: function( url, data, callback ) {
+ return jQuery.get( url, data, callback, "json" );
+ },
+
+ // Creates a full fledged settings object into target
+ // with both ajaxSettings and settings fields.
+ // If target is omitted, writes into ajaxSettings.
+ ajaxSetup: function( target, settings ) {
+ if ( settings ) {
+ // Building a settings object
+ ajaxExtend( target, jQuery.ajaxSettings );
+ } else {
+ // Extending ajaxSettings
+ settings = target;
+ target = jQuery.ajaxSettings;
+ }
+ ajaxExtend( target, settings );
+ return target;
+ },
+
+ ajaxSettings: {
+ url: ajaxLocation,
+ isLocal: rlocalProtocol.test( ajaxLocParts[ 1 ] ),
+ global: true,
+ type: "GET",
+ contentType: "application/x-www-form-urlencoded; charset=UTF-8",
+ processData: true,
+ async: true,
+ /*
+ timeout: 0,
+ data: null,
+ dataType: null,
+ username: null,
+ password: null,
+ cache: null,
+ throws: false,
+ traditional: false,
+ headers: {},
+ */
+
+ accepts: {
+ xml: "application/xml, text/xml",
+ html: "text/html",
+ text: "text/plain",
+ json: "application/json, text/javascript",
+ "*": allTypes
+ },
+
+ contents: {
+ xml: /xml/,
+ html: /html/,
+ json: /json/
+ },
+
+ responseFields: {
+ xml: "responseXML",
+ text: "responseText"
+ },
+
+ // List of data converters
+ // 1) key format is "source_type destination_type" (a single space in-between)
+ // 2) the catchall symbol "*" can be used for source_type
+ converters: {
+
+ // Convert anything to text
+ "* text": window.String,
+
+ // Text to html (true = no transformation)
+ "text html": true,
+
+ // Evaluate text as a json expression
+ "text json": jQuery.parseJSON,
+
+ // Parse text as xml
+ "text xml": jQuery.parseXML
+ },
+
+ // For options that shouldn't be deep extended:
+ // you can add your own custom options here if
+ // and when you create one that shouldn't be
+ // deep extended (see ajaxExtend)
+ flatOptions: {
+ context: true,
+ url: true
+ }
+ },
+
+ ajaxPrefilter: addToPrefiltersOrTransports( prefilters ),
+ ajaxTransport: addToPrefiltersOrTransports( transports ),
+
+ // Main method
+ ajax: function( url, options ) {
+
+ // If url is an object, simulate pre-1.5 signature
+ if ( typeof url === "object" ) {
+ options = url;
+ url = undefined;
+ }
+
+ // Force options to be an object
+ options = options || {};
+
+ var // ifModified key
+ ifModifiedKey,
+ // Response headers
+ responseHeadersString,
+ responseHeaders,
+ // transport
+ transport,
+ // timeout handle
+ timeoutTimer,
+ // Cross-domain detection vars
+ parts,
+ // To know if global events are to be dispatched
+ fireGlobals,
+ // Loop variable
+ i,
+ // Create the final options object
+ s = jQuery.ajaxSetup( {}, options ),
+ // Callbacks context
+ callbackContext = s.context || s,
+ // Context for global events
+ // It's the callbackContext if one was provided in the options
+ // and if it's a DOM node or a jQuery collection
+ globalEventContext = callbackContext !== s &&
+ ( callbackContext.nodeType || callbackContext instanceof jQuery ) ?
+ jQuery( callbackContext ) : jQuery.event,
+ // Deferreds
+ deferred = jQuery.Deferred(),
+ completeDeferred = jQuery.Callbacks( "once memory" ),
+ // Status-dependent callbacks
+ statusCode = s.statusCode || {},
+ // Headers (they are sent all at once)
+ requestHeaders = {},
+ requestHeadersNames = {},
+ // The jqXHR state
+ state = 0,
+ // Default abort message
+ strAbort = "canceled",
+ // Fake xhr
+ jqXHR = {
+
+ readyState: 0,
+
+ // Caches the header
+ setRequestHeader: function( name, value ) {
+ if ( !state ) {
+ var lname = name.toLowerCase();
+ name = requestHeadersNames[ lname ] = requestHeadersNames[ lname ] || name;
+ requestHeaders[ name ] = value;
+ }
+ return this;
+ },
+
+ // Raw string
+ getAllResponseHeaders: function() {
+ return state === 2 ? responseHeadersString : null;
+ },
+
+ // Builds headers hashtable if needed
+ getResponseHeader: function( key ) {
+ var match;
+ if ( state === 2 ) {
+ if ( !responseHeaders ) {
+ responseHeaders = {};
+ while( ( match = rheaders.exec( responseHeadersString ) ) ) {
+ responseHeaders[ match[1].toLowerCase() ] = match[ 2 ];
+ }
+ }
+ match = responseHeaders[ key.toLowerCase() ];
+ }
+ return match === undefined ? null : match;
+ },
+
+ // Overrides response content-type header
+ overrideMimeType: function( type ) {
+ if ( !state ) {
+ s.mimeType = type;
+ }
+ return this;
+ },
+
+ // Cancel the request
+ abort: function( statusText ) {
+ statusText = statusText || strAbort;
+ if ( transport ) {
+ transport.abort( statusText );
+ }
+ done( 0, statusText );
+ return this;
+ }
+ };
+
+ // Callback for when everything is done
+ // It is defined here because jslint complains if it is declared
+ // at the end of the function (which would be more logical and readable)
+ function done( status, nativeStatusText, responses, headers ) {
+ var isSuccess, success, error, response, modified,
+ statusText = nativeStatusText;
+
+ // Called once
+ if ( state === 2 ) {
+ return;
+ }
+
+ // State is "done" now
+ state = 2;
+
+ // Clear timeout if it exists
+ if ( timeoutTimer ) {
+ clearTimeout( timeoutTimer );
+ }
+
+ // Dereference transport for early garbage collection
+ // (no matter how long the jqXHR object will be used)
+ transport = undefined;
+
+ // Cache response headers
+ responseHeadersString = headers || "";
+
+ // Set readyState
+ jqXHR.readyState = status > 0 ? 4 : 0;
+
+ // Get response data
+ if ( responses ) {
+ response = ajaxHandleResponses( s, jqXHR, responses );
+ }
+
+ // If successful, handle type chaining
+ if ( status >= 200 && status < 300 || status === 304 ) {
+
+ // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
+ if ( s.ifModified ) {
+
+ modified = jqXHR.getResponseHeader("Last-Modified");
+ if ( modified ) {
+ jQuery.lastModified[ ifModifiedKey ] = modified;
+ }
+ modified = jqXHR.getResponseHeader("Etag");
+ if ( modified ) {
+ jQuery.etag[ ifModifiedKey ] = modified;
+ }
+ }
+
+ // If not modified
+ if ( status === 304 ) {
+
+ statusText = "notmodified";
+ isSuccess = true;
+
+ // If we have data
+ } else {
+
+ isSuccess = ajaxConvert( s, response );
+ statusText = isSuccess.state;
+ success = isSuccess.data;
+ error = isSuccess.error;
+ isSuccess = !error;
+ }
+ } else {
+ // We extract error from statusText
+ // then normalize statusText and status for non-aborts
+ error = statusText;
+ if ( !statusText || status ) {
+ statusText = "error";
+ if ( status < 0 ) {
+ status = 0;
+ }
+ }
+ }
+
+ // Set data for the fake xhr object
+ jqXHR.status = status;
+ jqXHR.statusText = ( nativeStatusText || statusText ) + "";
+
+ // Success/Error
+ if ( isSuccess ) {
+ deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] );
+ } else {
+ deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] );
+ }
+
+ // Status-dependent callbacks
+ jqXHR.statusCode( statusCode );
+ statusCode = undefined;
+
+ if ( fireGlobals ) {
+ globalEventContext.trigger( "ajax" + ( isSuccess ? "Success" : "Error" ),
+ [ jqXHR, s, isSuccess ? success : error ] );
+ }
+
+ // Complete
+ completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] );
+
+ if ( fireGlobals ) {
+ globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] );
+ // Handle the global AJAX counter
+ if ( !( --jQuery.active ) ) {
+ jQuery.event.trigger( "ajaxStop" );
+ }
+ }
+ }
+
+ // Attach deferreds
+ deferred.promise( jqXHR );
+ jqXHR.success = jqXHR.done;
+ jqXHR.error = jqXHR.fail;
+ jqXHR.complete = completeDeferred.add;
+
+ // Status-dependent callbacks
+ jqXHR.statusCode = function( map ) {
+ if ( map ) {
+ var tmp;
+ if ( state < 2 ) {
+ for ( tmp in map ) {
+ statusCode[ tmp ] = [ statusCode[tmp], map[tmp] ];
+ }
+ } else {
+ tmp = map[ jqXHR.status ];
+ jqXHR.always( tmp );
+ }
+ }
+ return this;
+ };
+
+ // Remove hash character (#7531: and string promotion)
+ // Add protocol if not provided (#5866: IE7 issue with protocol-less urls)
+ // We also use the url parameter if available
+ s.url = ( ( url || s.url ) + "" ).replace( rhash, "" ).replace( rprotocol, ajaxLocParts[ 1 ] + "//" );
+
+ // Extract dataTypes list
+ s.dataTypes = jQuery.trim( s.dataType || "*" ).toLowerCase().split( core_rspace );
+
+ // A cross-domain request is in order when we have a protocol:host:port mismatch
+ if ( s.crossDomain == null ) {
+ parts = rurl.exec( s.url.toLowerCase() );
+ s.crossDomain = !!( parts &&
+ ( parts[ 1 ] !== ajaxLocParts[ 1 ] || parts[ 2 ] !== ajaxLocParts[ 2 ] ||
+ ( parts[ 3 ] || ( parts[ 1 ] === "http:" ? 80 : 443 ) ) !=
+ ( ajaxLocParts[ 3 ] || ( ajaxLocParts[ 1 ] === "http:" ? 80 : 443 ) ) )
+ );
+ }
+
+ // Convert data if not already a string
+ if ( s.data && s.processData && typeof s.data !== "string" ) {
+ s.data = jQuery.param( s.data, s.traditional );
+ }
+
+ // Apply prefilters
+ inspectPrefiltersOrTransports( prefilters, s, options, jqXHR );
+
+ // If request was aborted inside a prefilter, stop there
+ if ( state === 2 ) {
+ return jqXHR;
+ }
+
+ // We can fire global events as of now if asked to
+ fireGlobals = s.global;
+
+ // Uppercase the type
+ s.type = s.type.toUpperCase();
+
+ // Determine if request has content
+ s.hasContent = !rnoContent.test( s.type );
+
+ // Watch for a new set of requests
+ if ( fireGlobals && jQuery.active++ === 0 ) {
+ jQuery.event.trigger( "ajaxStart" );
+ }
+
+ // More options handling for requests with no content
+ if ( !s.hasContent ) {
+
+ // If data is available, append data to url
+ if ( s.data ) {
+ s.url += ( rquery.test( s.url ) ? "&" : "?" ) + s.data;
+ // #9682: remove data so that it's not used in an eventual retry
+ delete s.data;
+ }
+
+ // Get ifModifiedKey before adding the anti-cache parameter
+ ifModifiedKey = s.url;
+
+ // Add anti-cache in url if needed
+ if ( s.cache === false ) {
+
+ var ts = jQuery.now(),
+ // try replacing _= if it is there
+ ret = s.url.replace( rts, "$1_=" + ts );
+
+ // if nothing was replaced, add timestamp to the end
+ s.url = ret + ( ( ret === s.url ) ? ( rquery.test( s.url ) ? "&" : "?" ) + "_=" + ts : "" );
+ }
+ }
+
+ // Set the correct header, if data is being sent
+ if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) {
+ jqXHR.setRequestHeader( "Content-Type", s.contentType );
+ }
+
+ // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
+ if ( s.ifModified ) {
+ ifModifiedKey = ifModifiedKey || s.url;
+ if ( jQuery.lastModified[ ifModifiedKey ] ) {
+ jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ ifModifiedKey ] );
+ }
+ if ( jQuery.etag[ ifModifiedKey ] ) {
+ jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ ifModifiedKey ] );
+ }
+ }
+
+ // Set the Accepts header for the server, depending on the dataType
+ jqXHR.setRequestHeader(
+ "Accept",
+ s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[0] ] ?
+ s.accepts[ s.dataTypes[0] ] + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) :
+ s.accepts[ "*" ]
+ );
+
+ // Check for headers option
+ for ( i in s.headers ) {
+ jqXHR.setRequestHeader( i, s.headers[ i ] );
+ }
+
+ // Allow custom headers/mimetypes and early abort
+ if ( s.beforeSend && ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || state === 2 ) ) {
+ // Abort if not done already and return
+ return jqXHR.abort();
+
+ }
+
+ // aborting is no longer a cancellation
+ strAbort = "abort";
+
+ // Install callbacks on deferreds
+ for ( i in { success: 1, error: 1, complete: 1 } ) {
+ jqXHR[ i ]( s[ i ] );
+ }
+
+ // Get transport
+ transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR );
+
+ // If no transport, we auto-abort
+ if ( !transport ) {
+ done( -1, "No Transport" );
+ } else {
+ jqXHR.readyState = 1;
+ // Send global event
+ if ( fireGlobals ) {
+ globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] );
+ }
+ // Timeout
+ if ( s.async && s.timeout > 0 ) {
+ timeoutTimer = setTimeout( function(){
+ jqXHR.abort( "timeout" );
+ }, s.timeout );
+ }
+
+ try {
+ state = 1;
+ transport.send( requestHeaders, done );
+ } catch (e) {
+ // Propagate exception as error if not done
+ if ( state < 2 ) {
+ done( -1, e );
+ // Simply rethrow otherwise
+ } else {
+ throw e;
+ }
+ }
+ }
+
+ return jqXHR;
+ },
+
+ // Counter for holding the number of active queries
+ active: 0,
+
+ // Last-Modified header cache for next request
+ lastModified: {},
+ etag: {}
+
+});
+
+/* Handles responses to an ajax request:
+ * - sets all responseXXX fields accordingly
+ * - finds the right dataType (mediates between content-type and expected dataType)
+ * - returns the corresponding response
+ */
+function ajaxHandleResponses( s, jqXHR, responses ) {
+
+ var ct, type, finalDataType, firstDataType,
+ contents = s.contents,
+ dataTypes = s.dataTypes,
+ responseFields = s.responseFields;
+
+ // Fill responseXXX fields
+ for ( type in responseFields ) {
+ if ( type in responses ) {
+ jqXHR[ responseFields[type] ] = responses[ type ];
+ }
+ }
+
+ // Remove auto dataType and get content-type in the process
+ while( dataTypes[ 0 ] === "*" ) {
+ dataTypes.shift();
+ if ( ct === undefined ) {
+ ct = s.mimeType || jqXHR.getResponseHeader( "content-type" );
+ }
+ }
+
+ // Check if we're dealing with a known content-type
+ if ( ct ) {
+ for ( type in contents ) {
+ if ( contents[ type ] && contents[ type ].test( ct ) ) {
+ dataTypes.unshift( type );
+ break;
+ }
+ }
+ }
+
+ // Check to see if we have a response for the expected dataType
+ if ( dataTypes[ 0 ] in responses ) {
+ finalDataType = dataTypes[ 0 ];
+ } else {
+ // Try convertible dataTypes
+ for ( type in responses ) {
+ if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[0] ] ) {
+ finalDataType = type;
+ break;
+ }
+ if ( !firstDataType ) {
+ firstDataType = type;
+ }
+ }
+ // Or just use first one
+ finalDataType = finalDataType || firstDataType;
+ }
+
+ // If we found a dataType
+ // We add the dataType to the list if needed
+ // and return the corresponding response
+ if ( finalDataType ) {
+ if ( finalDataType !== dataTypes[ 0 ] ) {
+ dataTypes.unshift( finalDataType );
+ }
+ return responses[ finalDataType ];
+ }
+}
+
+// Chain conversions given the request and the original response
+function ajaxConvert( s, response ) {
+
+ var conv, conv2, current, tmp,
+ // Work with a copy of dataTypes in case we need to modify it for conversion
+ dataTypes = s.dataTypes.slice(),
+ prev = dataTypes[ 0 ],
+ converters = {},
+ i = 0;
+
+ // Apply the dataFilter if provided
+ if ( s.dataFilter ) {
+ response = s.dataFilter( response, s.dataType );
+ }
+
+ // Create converters map with lowercased keys
+ if ( dataTypes[ 1 ] ) {
+ for ( conv in s.converters ) {
+ converters[ conv.toLowerCase() ] = s.converters[ conv ];
+ }
+ }
+
+ // Convert to each sequential dataType, tolerating list modification
+ for ( ; (current = dataTypes[++i]); ) {
+
+ // There's only work to do if current dataType is non-auto
+ if ( current !== "*" ) {
+
+ // Convert response if prev dataType is non-auto and differs from current
+ if ( prev !== "*" && prev !== current ) {
+
+ // Seek a direct converter
+ conv = converters[ prev + " " + current ] || converters[ "* " + current ];
+
+ // If none found, seek a pair
+ if ( !conv ) {
+ for ( conv2 in converters ) {
+
+ // If conv2 outputs current
+ tmp = conv2.split(" ");
+ if ( tmp[ 1 ] === current ) {
+
+ // If prev can be converted to accepted input
+ conv = converters[ prev + " " + tmp[ 0 ] ] ||
+ converters[ "* " + tmp[ 0 ] ];
+ if ( conv ) {
+ // Condense equivalence converters
+ if ( conv === true ) {
+ conv = converters[ conv2 ];
+
+ // Otherwise, insert the intermediate dataType
+ } else if ( converters[ conv2 ] !== true ) {
+ current = tmp[ 0 ];
+ dataTypes.splice( i--, 0, current );
+ }
+
+ break;
+ }
+ }
+ }
+ }
+
+ // Apply converter (if not an equivalence)
+ if ( conv !== true ) {
+
+ // Unless errors are allowed to bubble, catch and return them
+ if ( conv && s["throws"] ) {
+ response = conv( response );
+ } else {
+ try {
+ response = conv( response );
+ } catch ( e ) {
+ return { state: "parsererror", error: conv ? e : "No conversion from " + prev + " to " + current };
+ }
+ }
+ }
+ }
+
+ // Update prev for next iteration
+ prev = current;
+ }
+ }
+
+ return { state: "success", data: response };
+}
+var oldCallbacks = [],
+ rquestion = /\?/,
+ rjsonp = /(=)\?(?=&|$)|\?\?/,
+ nonce = jQuery.now();
+
+// Default jsonp settings
+jQuery.ajaxSetup({
+ jsonp: "callback",
+ jsonpCallback: function() {
+ var callback = oldCallbacks.pop() || ( jQuery.expando + "_" + ( nonce++ ) );
+ this[ callback ] = true;
+ return callback;
+ }
+});
+
+// Detect, normalize options and install callbacks for jsonp requests
+jQuery.ajaxPrefilter( "json jsonp", function( s, originalSettings, jqXHR ) {
+
+ var callbackName, overwritten, responseContainer,
+ data = s.data,
+ url = s.url,
+ hasCallback = s.jsonp !== false,
+ replaceInUrl = hasCallback && rjsonp.test( url ),
+ replaceInData = hasCallback && !replaceInUrl && typeof data === "string" &&
+ !( s.contentType || "" ).indexOf("application/x-www-form-urlencoded") &&
+ rjsonp.test( data );
+
+ // Handle iff the expected data type is "jsonp" or we have a parameter to set
+ if ( s.dataTypes[ 0 ] === "jsonp" || replaceInUrl || replaceInData ) {
+
+ // Get callback name, remembering preexisting value associated with it
+ callbackName = s.jsonpCallback = jQuery.isFunction( s.jsonpCallback ) ?
+ s.jsonpCallback() :
+ s.jsonpCallback;
+ overwritten = window[ callbackName ];
+
+ // Insert callback into url or form data
+ if ( replaceInUrl ) {
+ s.url = url.replace( rjsonp, "$1" + callbackName );
+ } else if ( replaceInData ) {
+ s.data = data.replace( rjsonp, "$1" + callbackName );
+ } else if ( hasCallback ) {
+ s.url += ( rquestion.test( url ) ? "&" : "?" ) + s.jsonp + "=" + callbackName;
+ }
+
+ // Use data converter to retrieve json after script execution
+ s.converters["script json"] = function() {
+ if ( !responseContainer ) {
+ jQuery.error( callbackName + " was not called" );
+ }
+ return responseContainer[ 0 ];
+ };
+
+ // force json dataType
+ s.dataTypes[ 0 ] = "json";
+
+ // Install callback
+ window[ callbackName ] = function() {
+ responseContainer = arguments;
+ };
+
+ // Clean-up function (fires after converters)
+ jqXHR.always(function() {
+ // Restore preexisting value
+ window[ callbackName ] = overwritten;
+
+ // Save back as free
+ if ( s[ callbackName ] ) {
+ // make sure that re-using the options doesn't screw things around
+ s.jsonpCallback = originalSettings.jsonpCallback;
+
+ // save the callback name for future use
+ oldCallbacks.push( callbackName );
+ }
+
+ // Call if it was a function and we have a response
+ if ( responseContainer && jQuery.isFunction( overwritten ) ) {
+ overwritten( responseContainer[ 0 ] );
+ }
+
+ responseContainer = overwritten = undefined;
+ });
+
+ // Delegate to script
+ return "script";
+ }
+});
+// Install script dataType
+jQuery.ajaxSetup({
+ accepts: {
+ script: "text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"
+ },
+ contents: {
+ script: /javascript|ecmascript/
+ },
+ converters: {
+ "text script": function( text ) {
+ jQuery.globalEval( text );
+ return text;
+ }
+ }
+});
+
+// Handle cache's special case and global
+jQuery.ajaxPrefilter( "script", function( s ) {
+ if ( s.cache === undefined ) {
+ s.cache = false;
+ }
+ if ( s.crossDomain ) {
+ s.type = "GET";
+ s.global = false;
+ }
+});
+
+// Bind script tag hack transport
+jQuery.ajaxTransport( "script", function(s) {
+
+ // This transport only deals with cross domain requests
+ if ( s.crossDomain ) {
+
+ var script,
+ head = document.head || document.getElementsByTagName( "head" )[0] || document.documentElement;
+
+ return {
+
+ send: function( _, callback ) {
+
+ script = document.createElement( "script" );
+
+ script.async = "async";
+
+ if ( s.scriptCharset ) {
+ script.charset = s.scriptCharset;
+ }
+
+ script.src = s.url;
+
+ // Attach handlers for all browsers
+ script.onload = script.onreadystatechange = function( _, isAbort ) {
+
+ if ( isAbort || !script.readyState || /loaded|complete/.test( script.readyState ) ) {
+
+ // Handle memory leak in IE
+ script.onload = script.onreadystatechange = null;
+
+ // Remove the script
+ if ( head && script.parentNode ) {
+ head.removeChild( script );
+ }
+
+ // Dereference the script
+ script = undefined;
+
+ // Callback if not abort
+ if ( !isAbort ) {
+ callback( 200, "success" );
+ }
+ }
+ };
+ // Use insertBefore instead of appendChild to circumvent an IE6 bug.
+ // This arises when a base node is used (#2709 and #4378).
+ head.insertBefore( script, head.firstChild );
+ },
+
+ abort: function() {
+ if ( script ) {
+ script.onload( 0, 1 );
+ }
+ }
+ };
+ }
+});
+var xhrCallbacks,
+ // #5280: Internet Explorer will keep connections alive if we don't abort on unload
+ xhrOnUnloadAbort = window.ActiveXObject ? function() {
+ // Abort all pending requests
+ for ( var key in xhrCallbacks ) {
+ xhrCallbacks[ key ]( 0, 1 );
+ }
+ } : false,
+ xhrId = 0;
+
+// Functions to create xhrs
+function createStandardXHR() {
+ try {
+ return new window.XMLHttpRequest();
+ } catch( e ) {}
+}
+
+function createActiveXHR() {
+ try {
+ return new window.ActiveXObject( "Microsoft.XMLHTTP" );
+ } catch( e ) {}
+}
+
+// Create the request object
+// (This is still attached to ajaxSettings for backward compatibility)
+jQuery.ajaxSettings.xhr = window.ActiveXObject ?
+ /* Microsoft failed to properly
+ * implement the XMLHttpRequest in IE7 (can't request local files),
+ * so we use the ActiveXObject when it is available
+ * Additionally XMLHttpRequest can be disabled in IE7/IE8 so
+ * we need a fallback.
+ */
+ function() {
+ return !this.isLocal && createStandardXHR() || createActiveXHR();
+ } :
+ // For all other browsers, use the standard XMLHttpRequest object
+ createStandardXHR;
+
+// Determine support properties
+(function( xhr ) {
+ jQuery.extend( jQuery.support, {
+ ajax: !!xhr,
+ cors: !!xhr && ( "withCredentials" in xhr )
+ });
+})( jQuery.ajaxSettings.xhr() );
+
+// Create transport if the browser can provide an xhr
+if ( jQuery.support.ajax ) {
+
+ jQuery.ajaxTransport(function( s ) {
+ // Cross domain only allowed if supported through XMLHttpRequest
+ if ( !s.crossDomain || jQuery.support.cors ) {
+
+ var callback;
+
+ return {
+ send: function( headers, complete ) {
+
+ // Get a new xhr
+ var handle, i,
+ xhr = s.xhr();
+
+ // Open the socket
+ // Passing null username, generates a login popup on Opera (#2865)
+ if ( s.username ) {
+ xhr.open( s.type, s.url, s.async, s.username, s.password );
+ } else {
+ xhr.open( s.type, s.url, s.async );
+ }
+
+ // Apply custom fields if provided
+ if ( s.xhrFields ) {
+ for ( i in s.xhrFields ) {
+ xhr[ i ] = s.xhrFields[ i ];
+ }
+ }
+
+ // Override mime type if needed
+ if ( s.mimeType && xhr.overrideMimeType ) {
+ xhr.overrideMimeType( s.mimeType );
+ }
+
+ // X-Requested-With header
+ // For cross-domain requests, seeing as conditions for a preflight are
+ // akin to a jigsaw puzzle, we simply never set it to be sure.
+ // (it can always be set on a per-request basis or even using ajaxSetup)
+ // For same-domain requests, won't change header if already provided.
+ if ( !s.crossDomain && !headers["X-Requested-With"] ) {
+ headers[ "X-Requested-With" ] = "XMLHttpRequest";
+ }
+
+ // Need an extra try/catch for cross domain requests in Firefox 3
+ try {
+ for ( i in headers ) {
+ xhr.setRequestHeader( i, headers[ i ] );
+ }
+ } catch( _ ) {}
+
+ // Do send the request
+ // This may raise an exception which is actually
+ // handled in jQuery.ajax (so no try/catch here)
+ xhr.send( ( s.hasContent && s.data ) || null );
+
+ // Listener
+ callback = function( _, isAbort ) {
+
+ var status,
+ statusText,
+ responseHeaders,
+ responses,
+ xml;
+
+ // Firefox throws exceptions when accessing properties
+ // of an xhr when a network error occurred
+ // http://helpful.knobs-dials.com/index.php/Component_returned_failure_code:_0x80040111_(NS_ERROR_NOT_AVAILABLE)
+ try {
+
+ // Was never called and is aborted or complete
+ if ( callback && ( isAbort || xhr.readyState === 4 ) ) {
+
+ // Only called once
+ callback = undefined;
+
+ // Do not keep as active anymore
+ if ( handle ) {
+ xhr.onreadystatechange = jQuery.noop;
+ if ( xhrOnUnloadAbort ) {
+ delete xhrCallbacks[ handle ];
+ }
+ }
+
+ // If it's an abort
+ if ( isAbort ) {
+ // Abort it manually if needed
+ if ( xhr.readyState !== 4 ) {
+ xhr.abort();
+ }
+ } else {
+ status = xhr.status;
+ responseHeaders = xhr.getAllResponseHeaders();
+ responses = {};
+ xml = xhr.responseXML;
+
+ // Construct response list
+ if ( xml && xml.documentElement /* #4958 */ ) {
+ responses.xml = xml;
+ }
+
+ // When requesting binary data, IE6-9 will throw an exception
+ // on any attempt to access responseText (#11426)
+ try {
+ responses.text = xhr.responseText;
+ } catch( e ) {
+ }
+
+ // Firefox throws an exception when accessing
+ // statusText for faulty cross-domain requests
+ try {
+ statusText = xhr.statusText;
+ } catch( e ) {
+ // We normalize with Webkit giving an empty statusText
+ statusText = "";
+ }
+
+ // Filter status for non standard behaviors
+
+ // If the request is local and we have data: assume a success
+ // (success with no data won't get notified, that's the best we
+ // can do given current implementations)
+ if ( !status && s.isLocal && !s.crossDomain ) {
+ status = responses.text ? 200 : 404;
+ // IE - #1450: sometimes returns 1223 when it should be 204
+ } else if ( status === 1223 ) {
+ status = 204;
+ }
+ }
+ }
+ } catch( firefoxAccessException ) {
+ if ( !isAbort ) {
+ complete( -1, firefoxAccessException );
+ }
+ }
+
+ // Call complete if needed
+ if ( responses ) {
+ complete( status, statusText, responses, responseHeaders );
+ }
+ };
+
+ if ( !s.async ) {
+ // if we're in sync mode we fire the callback
+ callback();
+ } else if ( xhr.readyState === 4 ) {
+ // (IE6 & IE7) if it's in cache and has been
+ // retrieved directly we need to fire the callback
+ setTimeout( callback, 0 );
+ } else {
+ handle = ++xhrId;
+ if ( xhrOnUnloadAbort ) {
+ // Create the active xhrs callbacks list if needed
+ // and attach the unload handler
+ if ( !xhrCallbacks ) {
+ xhrCallbacks = {};
+ jQuery( window ).unload( xhrOnUnloadAbort );
+ }
+ // Add to list of active xhrs callbacks
+ xhrCallbacks[ handle ] = callback;
+ }
+ xhr.onreadystatechange = callback;
+ }
+ },
+
+ abort: function() {
+ if ( callback ) {
+ callback(0,1);
+ }
+ }
+ };
+ }
+ });
+}
+var fxNow, timerId,
+ rfxtypes = /^(?:toggle|show|hide)$/,
+ rfxnum = new RegExp( "^(?:([-+])=|)(" + core_pnum + ")([a-z%]*)$", "i" ),
+ rrun = /queueHooks$/,
+ animationPrefilters = [ defaultPrefilter ],
+ tweeners = {
+ "*": [function( prop, value ) {
+ var end, unit,
+ tween = this.createTween( prop, value ),
+ parts = rfxnum.exec( value ),
+ target = tween.cur(),
+ start = +target || 0,
+ scale = 1,
+ maxIterations = 20;
+
+ if ( parts ) {
+ end = +parts[2];
+ unit = parts[3] || ( jQuery.cssNumber[ prop ] ? "" : "px" );
+
+ // We need to compute starting value
+ if ( unit !== "px" && start ) {
+ // Iteratively approximate from a nonzero starting point
+ // Prefer the current property, because this process will be trivial if it uses the same units
+ // Fallback to end or a simple constant
+ start = jQuery.css( tween.elem, prop, true ) || end || 1;
+
+ do {
+ // If previous iteration zeroed out, double until we get *something*
+ // Use a string for doubling factor so we don't accidentally see scale as unchanged below
+ scale = scale || ".5";
+
+ // Adjust and apply
+ start = start / scale;
+ jQuery.style( tween.elem, prop, start + unit );
+
+ // Update scale, tolerating zero or NaN from tween.cur()
+ // And breaking the loop if scale is unchanged or perfect, or if we've just had enough
+ } while ( scale !== (scale = tween.cur() / target) && scale !== 1 && --maxIterations );
+ }
+
+ tween.unit = unit;
+ tween.start = start;
+ // If a +=/-= token was provided, we're doing a relative animation
+ tween.end = parts[1] ? start + ( parts[1] + 1 ) * end : end;
+ }
+ return tween;
+ }]
+ };
+
+// Animations created synchronously will run synchronously
+function createFxNow() {
+ setTimeout(function() {
+ fxNow = undefined;
+ }, 0 );
+ return ( fxNow = jQuery.now() );
+}
+
+function createTweens( animation, props ) {
+ jQuery.each( props, function( prop, value ) {
+ var collection = ( tweeners[ prop ] || [] ).concat( tweeners[ "*" ] ),
+ index = 0,
+ length = collection.length;
+ for ( ; index < length; index++ ) {
+ if ( collection[ index ].call( animation, prop, value ) ) {
+
+ // we're done with this property
+ return;
+ }
+ }
+ });
+}
+
+function Animation( elem, properties, options ) {
+ var result,
+ index = 0,
+ tweenerIndex = 0,
+ length = animationPrefilters.length,
+ deferred = jQuery.Deferred().always( function() {
+ // don't match elem in the :animated selector
+ delete tick.elem;
+ }),
+ tick = function() {
+ var currentTime = fxNow || createFxNow(),
+ remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ),
+ // archaic crash bug won't allow us to use 1 - ( 0.5 || 0 ) (#12497)
+ temp = remaining / animation.duration || 0,
+ percent = 1 - temp,
+ index = 0,
+ length = animation.tweens.length;
+
+ for ( ; index < length ; index++ ) {
+ animation.tweens[ index ].run( percent );
+ }
+
+ deferred.notifyWith( elem, [ animation, percent, remaining ]);
+
+ if ( percent < 1 && length ) {
+ return remaining;
+ } else {
+ deferred.resolveWith( elem, [ animation ] );
+ return false;
+ }
+ },
+ animation = deferred.promise({
+ elem: elem,
+ props: jQuery.extend( {}, properties ),
+ opts: jQuery.extend( true, { specialEasing: {} }, options ),
+ originalProperties: properties,
+ originalOptions: options,
+ startTime: fxNow || createFxNow(),
+ duration: options.duration,
+ tweens: [],
+ createTween: function( prop, end, easing ) {
+ var tween = jQuery.Tween( elem, animation.opts, prop, end,
+ animation.opts.specialEasing[ prop ] || animation.opts.easing );
+ animation.tweens.push( tween );
+ return tween;
+ },
+ stop: function( gotoEnd ) {
+ var index = 0,
+ // if we are going to the end, we want to run all the tweens
+ // otherwise we skip this part
+ length = gotoEnd ? animation.tweens.length : 0;
+
+ for ( ; index < length ; index++ ) {
+ animation.tweens[ index ].run( 1 );
+ }
+
+ // resolve when we played the last frame
+ // otherwise, reject
+ if ( gotoEnd ) {
+ deferred.resolveWith( elem, [ animation, gotoEnd ] );
+ } else {
+ deferred.rejectWith( elem, [ animation, gotoEnd ] );
+ }
+ return this;
+ }
+ }),
+ props = animation.props;
+
+ propFilter( props, animation.opts.specialEasing );
+
+ for ( ; index < length ; index++ ) {
+ result = animationPrefilters[ index ].call( animation, elem, props, animation.opts );
+ if ( result ) {
+ return result;
+ }
+ }
+
+ createTweens( animation, props );
+
+ if ( jQuery.isFunction( animation.opts.start ) ) {
+ animation.opts.start.call( elem, animation );
+ }
+
+ jQuery.fx.timer(
+ jQuery.extend( tick, {
+ anim: animation,
+ queue: animation.opts.queue,
+ elem: elem
+ })
+ );
+
+ // attach callbacks from options
+ return animation.progress( animation.opts.progress )
+ .done( animation.opts.done, animation.opts.complete )
+ .fail( animation.opts.fail )
+ .always( animation.opts.always );
+}
+
+function propFilter( props, specialEasing ) {
+ var index, name, easing, value, hooks;
+
+ // camelCase, specialEasing and expand cssHook pass
+ for ( index in props ) {
+ name = jQuery.camelCase( index );
+ easing = specialEasing[ name ];
+ value = props[ index ];
+ if ( jQuery.isArray( value ) ) {
+ easing = value[ 1 ];
+ value = props[ index ] = value[ 0 ];
+ }
+
+ if ( index !== name ) {
+ props[ name ] = value;
+ delete props[ index ];
+ }
+
+ hooks = jQuery.cssHooks[ name ];
+ if ( hooks && "expand" in hooks ) {
+ value = hooks.expand( value );
+ delete props[ name ];
+
+ // not quite $.extend, this wont overwrite keys already present.
+ // also - reusing 'index' from above because we have the correct "name"
+ for ( index in value ) {
+ if ( !( index in props ) ) {
+ props[ index ] = value[ index ];
+ specialEasing[ index ] = easing;
+ }
+ }
+ } else {
+ specialEasing[ name ] = easing;
+ }
+ }
+}
+
+jQuery.Animation = jQuery.extend( Animation, {
+
+ tweener: function( props, callback ) {
+ if ( jQuery.isFunction( props ) ) {
+ callback = props;
+ props = [ "*" ];
+ } else {
+ props = props.split(" ");
+ }
+
+ var prop,
+ index = 0,
+ length = props.length;
+
+ for ( ; index < length ; index++ ) {
+ prop = props[ index ];
+ tweeners[ prop ] = tweeners[ prop ] || [];
+ tweeners[ prop ].unshift( callback );
+ }
+ },
+
+ prefilter: function( callback, prepend ) {
+ if ( prepend ) {
+ animationPrefilters.unshift( callback );
+ } else {
+ animationPrefilters.push( callback );
+ }
+ }
+});
+
+function defaultPrefilter( elem, props, opts ) {
+ var index, prop, value, length, dataShow, toggle, tween, hooks, oldfire,
+ anim = this,
+ style = elem.style,
+ orig = {},
+ handled = [],
+ hidden = elem.nodeType && isHidden( elem );
+
+ // handle queue: false promises
+ if ( !opts.queue ) {
+ hooks = jQuery._queueHooks( elem, "fx" );
+ if ( hooks.unqueued == null ) {
+ hooks.unqueued = 0;
+ oldfire = hooks.empty.fire;
+ hooks.empty.fire = function() {
+ if ( !hooks.unqueued ) {
+ oldfire();
+ }
+ };
+ }
+ hooks.unqueued++;
+
+ anim.always(function() {
+ // doing this makes sure that the complete handler will be called
+ // before this completes
+ anim.always(function() {
+ hooks.unqueued--;
+ if ( !jQuery.queue( elem, "fx" ).length ) {
+ hooks.empty.fire();
+ }
+ });
+ });
+ }
+
+ // height/width overflow pass
+ if ( elem.nodeType === 1 && ( "height" in props || "width" in props ) ) {
+ // Make sure that nothing sneaks out
+ // Record all 3 overflow attributes because IE does not
+ // change the overflow attribute when overflowX and
+ // overflowY are set to the same value
+ opts.overflow = [ style.overflow, style.overflowX, style.overflowY ];
+
+ // Set display property to inline-block for height/width
+ // animations on inline elements that are having width/height animated
+ if ( jQuery.css( elem, "display" ) === "inline" &&
+ jQuery.css( elem, "float" ) === "none" ) {
+
+ // inline-level elements accept inline-block;
+ // block-level elements need to be inline with layout
+ if ( !jQuery.support.inlineBlockNeedsLayout || css_defaultDisplay( elem.nodeName ) === "inline" ) {
+ style.display = "inline-block";
+
+ } else {
+ style.zoom = 1;
+ }
+ }
+ }
+
+ if ( opts.overflow ) {
+ style.overflow = "hidden";
+ if ( !jQuery.support.shrinkWrapBlocks ) {
+ anim.done(function() {
+ style.overflow = opts.overflow[ 0 ];
+ style.overflowX = opts.overflow[ 1 ];
+ style.overflowY = opts.overflow[ 2 ];
+ });
+ }
+ }
+
+
+ // show/hide pass
+ for ( index in props ) {
+ value = props[ index ];
+ if ( rfxtypes.exec( value ) ) {
+ delete props[ index ];
+ toggle = toggle || value === "toggle";
+ if ( value === ( hidden ? "hide" : "show" ) ) {
+ continue;
+ }
+ handled.push( index );
+ }
+ }
+
+ length = handled.length;
+ if ( length ) {
+ dataShow = jQuery._data( elem, "fxshow" ) || jQuery._data( elem, "fxshow", {} );
+ if ( "hidden" in dataShow ) {
+ hidden = dataShow.hidden;
+ }
+
+ // store state if its toggle - enables .stop().toggle() to "reverse"
+ if ( toggle ) {
+ dataShow.hidden = !hidden;
+ }
+ if ( hidden ) {
+ jQuery( elem ).show();
+ } else {
+ anim.done(function() {
+ jQuery( elem ).hide();
+ });
+ }
+ anim.done(function() {
+ var prop;
+ jQuery.removeData( elem, "fxshow", true );
+ for ( prop in orig ) {
+ jQuery.style( elem, prop, orig[ prop ] );
+ }
+ });
+ for ( index = 0 ; index < length ; index++ ) {
+ prop = handled[ index ];
+ tween = anim.createTween( prop, hidden ? dataShow[ prop ] : 0 );
+ orig[ prop ] = dataShow[ prop ] || jQuery.style( elem, prop );
+
+ if ( !( prop in dataShow ) ) {
+ dataShow[ prop ] = tween.start;
+ if ( hidden ) {
+ tween.end = tween.start;
+ tween.start = prop === "width" || prop === "height" ? 1 : 0;
+ }
+ }
+ }
+ }
+}
+
+function Tween( elem, options, prop, end, easing ) {
+ return new Tween.prototype.init( elem, options, prop, end, easing );
+}
+jQuery.Tween = Tween;
+
+Tween.prototype = {
+ constructor: Tween,
+ init: function( elem, options, prop, end, easing, unit ) {
+ this.elem = elem;
+ this.prop = prop;
+ this.easing = easing || "swing";
+ this.options = options;
+ this.start = this.now = this.cur();
+ this.end = end;
+ this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" );
+ },
+ cur: function() {
+ var hooks = Tween.propHooks[ this.prop ];
+
+ return hooks && hooks.get ?
+ hooks.get( this ) :
+ Tween.propHooks._default.get( this );
+ },
+ run: function( percent ) {
+ var eased,
+ hooks = Tween.propHooks[ this.prop ];
+
+ if ( this.options.duration ) {
+ this.pos = eased = jQuery.easing[ this.easing ](
+ percent, this.options.duration * percent, 0, 1, this.options.duration
+ );
+ } else {
+ this.pos = eased = percent;
+ }
+ this.now = ( this.end - this.start ) * eased + this.start;
+
+ if ( this.options.step ) {
+ this.options.step.call( this.elem, this.now, this );
+ }
+
+ if ( hooks && hooks.set ) {
+ hooks.set( this );
+ } else {
+ Tween.propHooks._default.set( this );
+ }
+ return this;
+ }
+};
+
+Tween.prototype.init.prototype = Tween.prototype;
+
+Tween.propHooks = {
+ _default: {
+ get: function( tween ) {
+ var result;
+
+ if ( tween.elem[ tween.prop ] != null &&
+ (!tween.elem.style || tween.elem.style[ tween.prop ] == null) ) {
+ return tween.elem[ tween.prop ];
+ }
+
+ // passing any value as a 4th parameter to .css will automatically
+ // attempt a parseFloat and fallback to a string if the parse fails
+ // so, simple values such as "10px" are parsed to Float.
+ // complex values such as "rotate(1rad)" are returned as is.
+ result = jQuery.css( tween.elem, tween.prop, false, "" );
+ // Empty strings, null, undefined and "auto" are converted to 0.
+ return !result || result === "auto" ? 0 : result;
+ },
+ set: function( tween ) {
+ // use step hook for back compat - use cssHook if its there - use .style if its
+ // available and use plain properties where available
+ if ( jQuery.fx.step[ tween.prop ] ) {
+ jQuery.fx.step[ tween.prop ]( tween );
+ } else if ( tween.elem.style && ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || jQuery.cssHooks[ tween.prop ] ) ) {
+ jQuery.style( tween.elem, tween.prop, tween.now + tween.unit );
+ } else {
+ tween.elem[ tween.prop ] = tween.now;
+ }
+ }
+ }
+};
+
+// Remove in 2.0 - this supports IE8's panic based approach
+// to setting things on disconnected nodes
+
+Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = {
+ set: function( tween ) {
+ if ( tween.elem.nodeType && tween.elem.parentNode ) {
+ tween.elem[ tween.prop ] = tween.now;
+ }
+ }
+};
+
+jQuery.each([ "toggle", "show", "hide" ], function( i, name ) {
+ var cssFn = jQuery.fn[ name ];
+ jQuery.fn[ name ] = function( speed, easing, callback ) {
+ return speed == null || typeof speed === "boolean" ||
+ // special check for .toggle( handler, handler, ... )
+ ( !i && jQuery.isFunction( speed ) && jQuery.isFunction( easing ) ) ?
+ cssFn.apply( this, arguments ) :
+ this.animate( genFx( name, true ), speed, easing, callback );
+ };
+});
+
+jQuery.fn.extend({
+ fadeTo: function( speed, to, easing, callback ) {
+
+ // show any hidden elements after setting opacity to 0
+ return this.filter( isHidden ).css( "opacity", 0 ).show()
+
+ // animate to the value specified
+ .end().animate({ opacity: to }, speed, easing, callback );
+ },
+ animate: function( prop, speed, easing, callback ) {
+ var empty = jQuery.isEmptyObject( prop ),
+ optall = jQuery.speed( speed, easing, callback ),
+ doAnimation = function() {
+ // Operate on a copy of prop so per-property easing won't be lost
+ var anim = Animation( this, jQuery.extend( {}, prop ), optall );
+
+ // Empty animations resolve immediately
+ if ( empty ) {
+ anim.stop( true );
+ }
+ };
+
+ return empty || optall.queue === false ?
+ this.each( doAnimation ) :
+ this.queue( optall.queue, doAnimation );
+ },
+ stop: function( type, clearQueue, gotoEnd ) {
+ var stopQueue = function( hooks ) {
+ var stop = hooks.stop;
+ delete hooks.stop;
+ stop( gotoEnd );
+ };
+
+ if ( typeof type !== "string" ) {
+ gotoEnd = clearQueue;
+ clearQueue = type;
+ type = undefined;
+ }
+ if ( clearQueue && type !== false ) {
+ this.queue( type || "fx", [] );
+ }
+
+ return this.each(function() {
+ var dequeue = true,
+ index = type != null && type + "queueHooks",
+ timers = jQuery.timers,
+ data = jQuery._data( this );
+
+ if ( index ) {
+ if ( data[ index ] && data[ index ].stop ) {
+ stopQueue( data[ index ] );
+ }
+ } else {
+ for ( index in data ) {
+ if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) {
+ stopQueue( data[ index ] );
+ }
+ }
+ }
+
+ for ( index = timers.length; index--; ) {
+ if ( timers[ index ].elem === this && (type == null || timers[ index ].queue === type) ) {
+ timers[ index ].anim.stop( gotoEnd );
+ dequeue = false;
+ timers.splice( index, 1 );
+ }
+ }
+
+ // start the next in the queue if the last step wasn't forced
+ // timers currently will call their complete callbacks, which will dequeue
+ // but only if they were gotoEnd
+ if ( dequeue || !gotoEnd ) {
+ jQuery.dequeue( this, type );
+ }
+ });
+ }
+});
+
+// Generate parameters to create a standard animation
+function genFx( type, includeWidth ) {
+ var which,
+ attrs = { height: type },
+ i = 0;
+
+ // if we include width, step value is 1 to do all cssExpand values,
+ // if we don't include width, step value is 2 to skip over Left and Right
+ includeWidth = includeWidth? 1 : 0;
+ for( ; i < 4 ; i += 2 - includeWidth ) {
+ which = cssExpand[ i ];
+ attrs[ "margin" + which ] = attrs[ "padding" + which ] = type;
+ }
+
+ if ( includeWidth ) {
+ attrs.opacity = attrs.width = type;
+ }
+
+ return attrs;
+}
+
+// Generate shortcuts for custom animations
+jQuery.each({
+ slideDown: genFx("show"),
+ slideUp: genFx("hide"),
+ slideToggle: genFx("toggle"),
+ fadeIn: { opacity: "show" },
+ fadeOut: { opacity: "hide" },
+ fadeToggle: { opacity: "toggle" }
+}, function( name, props ) {
+ jQuery.fn[ name ] = function( speed, easing, callback ) {
+ return this.animate( props, speed, easing, callback );
+ };
+});
+
+jQuery.speed = function( speed, easing, fn ) {
+ var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : {
+ complete: fn || !fn && easing ||
+ jQuery.isFunction( speed ) && speed,
+ duration: speed,
+ easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing
+ };
+
+ opt.duration = jQuery.fx.off ? 0 : typeof opt.duration === "number" ? opt.duration :
+ opt.duration in jQuery.fx.speeds ? jQuery.fx.speeds[ opt.duration ] : jQuery.fx.speeds._default;
+
+ // normalize opt.queue - true/undefined/null -> "fx"
+ if ( opt.queue == null || opt.queue === true ) {
+ opt.queue = "fx";
+ }
+
+ // Queueing
+ opt.old = opt.complete;
+
+ opt.complete = function() {
+ if ( jQuery.isFunction( opt.old ) ) {
+ opt.old.call( this );
+ }
+
+ if ( opt.queue ) {
+ jQuery.dequeue( this, opt.queue );
+ }
+ };
+
+ return opt;
+};
+
+jQuery.easing = {
+ linear: function( p ) {
+ return p;
+ },
+ swing: function( p ) {
+ return 0.5 - Math.cos( p*Math.PI ) / 2;
+ }
+};
+
+jQuery.timers = [];
+jQuery.fx = Tween.prototype.init;
+jQuery.fx.tick = function() {
+ var timer,
+ timers = jQuery.timers,
+ i = 0;
+
+ fxNow = jQuery.now();
+
+ for ( ; i < timers.length; i++ ) {
+ timer = timers[ i ];
+ // Checks the timer has not already been removed
+ if ( !timer() && timers[ i ] === timer ) {
+ timers.splice( i--, 1 );
+ }
+ }
+
+ if ( !timers.length ) {
+ jQuery.fx.stop();
+ }
+ fxNow = undefined;
+};
+
+jQuery.fx.timer = function( timer ) {
+ if ( timer() && jQuery.timers.push( timer ) && !timerId ) {
+ timerId = setInterval( jQuery.fx.tick, jQuery.fx.interval );
+ }
+};
+
+jQuery.fx.interval = 13;
+
+jQuery.fx.stop = function() {
+ clearInterval( timerId );
+ timerId = null;
+};
+
+jQuery.fx.speeds = {
+ slow: 600,
+ fast: 200,
+ // Default speed
+ _default: 400
+};
+
+// Back Compat <1.8 extension point
+jQuery.fx.step = {};
+
+if ( jQuery.expr && jQuery.expr.filters ) {
+ jQuery.expr.filters.animated = function( elem ) {
+ return jQuery.grep(jQuery.timers, function( fn ) {
+ return elem === fn.elem;
+ }).length;
+ };
+}
+var rroot = /^(?:body|html)$/i;
+
+jQuery.fn.offset = function( options ) {
+ if ( arguments.length ) {
+ return options === undefined ?
+ this :
+ this.each(function( i ) {
+ jQuery.offset.setOffset( this, options, i );
+ });
+ }
+
+ var docElem, body, win, clientTop, clientLeft, scrollTop, scrollLeft,
+ box = { top: 0, left: 0 },
+ elem = this[ 0 ],
+ doc = elem && elem.ownerDocument;
+
+ if ( !doc ) {
+ return;
+ }
+
+ if ( (body = doc.body) === elem ) {
+ return jQuery.offset.bodyOffset( elem );
+ }
+
+ docElem = doc.documentElement;
+
+ // Make sure it's not a disconnected DOM node
+ if ( !jQuery.contains( docElem, elem ) ) {
+ return box;
+ }
+
+ // If we don't have gBCR, just use 0,0 rather than error
+ // BlackBerry 5, iOS 3 (original iPhone)
+ if ( typeof elem.getBoundingClientRect !== "undefined" ) {
+ box = elem.getBoundingClientRect();
+ }
+ win = getWindow( doc );
+ clientTop = docElem.clientTop || body.clientTop || 0;
+ clientLeft = docElem.clientLeft || body.clientLeft || 0;
+ scrollTop = win.pageYOffset || docElem.scrollTop;
+ scrollLeft = win.pageXOffset || docElem.scrollLeft;
+ return {
+ top: box.top + scrollTop - clientTop,
+ left: box.left + scrollLeft - clientLeft
+ };
+};
+
+jQuery.offset = {
+
+ bodyOffset: function( body ) {
+ var top = body.offsetTop,
+ left = body.offsetLeft;
+
+ if ( jQuery.support.doesNotIncludeMarginInBodyOffset ) {
+ top += parseFloat( jQuery.css(body, "marginTop") ) || 0;
+ left += parseFloat( jQuery.css(body, "marginLeft") ) || 0;
+ }
+
+ return { top: top, left: left };
+ },
+
+ setOffset: function( elem, options, i ) {
+ var position = jQuery.css( elem, "position" );
+
+ // set position first, in-case top/left are set even on static elem
+ if ( position === "static" ) {
+ elem.style.position = "relative";
+ }
+
+ var curElem = jQuery( elem ),
+ curOffset = curElem.offset(),
+ curCSSTop = jQuery.css( elem, "top" ),
+ curCSSLeft = jQuery.css( elem, "left" ),
+ calculatePosition = ( position === "absolute" || position === "fixed" ) && jQuery.inArray("auto", [curCSSTop, curCSSLeft]) > -1,
+ props = {}, curPosition = {}, curTop, curLeft;
+
+ // need to be able to calculate position if either top or left is auto and position is either absolute or fixed
+ if ( calculatePosition ) {
+ curPosition = curElem.position();
+ curTop = curPosition.top;
+ curLeft = curPosition.left;
+ } else {
+ curTop = parseFloat( curCSSTop ) || 0;
+ curLeft = parseFloat( curCSSLeft ) || 0;
+ }
+
+ if ( jQuery.isFunction( options ) ) {
+ options = options.call( elem, i, curOffset );
+ }
+
+ if ( options.top != null ) {
+ props.top = ( options.top - curOffset.top ) + curTop;
+ }
+ if ( options.left != null ) {
+ props.left = ( options.left - curOffset.left ) + curLeft;
+ }
+
+ if ( "using" in options ) {
+ options.using.call( elem, props );
+ } else {
+ curElem.css( props );
+ }
+ }
+};
+
+
+jQuery.fn.extend({
+
+ position: function() {
+ if ( !this[0] ) {
+ return;
+ }
+
+ var elem = this[0],
+
+ // Get *real* offsetParent
+ offsetParent = this.offsetParent(),
+
+ // Get correct offsets
+ offset = this.offset(),
+ parentOffset = rroot.test(offsetParent[0].nodeName) ? { top: 0, left: 0 } : offsetParent.offset();
+
+ // Subtract element margins
+ // note: when an element has margin: auto the offsetLeft and marginLeft
+ // are the same in Safari causing offset.left to incorrectly be 0
+ offset.top -= parseFloat( jQuery.css(elem, "marginTop") ) || 0;
+ offset.left -= parseFloat( jQuery.css(elem, "marginLeft") ) || 0;
+
+ // Add offsetParent borders
+ parentOffset.top += parseFloat( jQuery.css(offsetParent[0], "borderTopWidth") ) || 0;
+ parentOffset.left += parseFloat( jQuery.css(offsetParent[0], "borderLeftWidth") ) || 0;
+
+ // Subtract the two offsets
+ return {
+ top: offset.top - parentOffset.top,
+ left: offset.left - parentOffset.left
+ };
+ },
+
+ offsetParent: function() {
+ return this.map(function() {
+ var offsetParent = this.offsetParent || document.body;
+ while ( offsetParent && (!rroot.test(offsetParent.nodeName) && jQuery.css(offsetParent, "position") === "static") ) {
+ offsetParent = offsetParent.offsetParent;
+ }
+ return offsetParent || document.body;
+ });
+ }
+});
+
+
+// Create scrollLeft and scrollTop methods
+jQuery.each( {scrollLeft: "pageXOffset", scrollTop: "pageYOffset"}, function( method, prop ) {
+ var top = /Y/.test( prop );
+
+ jQuery.fn[ method ] = function( val ) {
+ return jQuery.access( this, function( elem, method, val ) {
+ var win = getWindow( elem );
+
+ if ( val === undefined ) {
+ return win ? (prop in win) ? win[ prop ] :
+ win.document.documentElement[ method ] :
+ elem[ method ];
+ }
+
+ if ( win ) {
+ win.scrollTo(
+ !top ? val : jQuery( win ).scrollLeft(),
+ top ? val : jQuery( win ).scrollTop()
+ );
+
+ } else {
+ elem[ method ] = val;
+ }
+ }, method, val, arguments.length, null );
+ };
+});
+
+function getWindow( elem ) {
+ return jQuery.isWindow( elem ) ?
+ elem :
+ elem.nodeType === 9 ?
+ elem.defaultView || elem.parentWindow :
+ false;
+}
+// Create innerHeight, innerWidth, height, width, outerHeight and outerWidth methods
+jQuery.each( { Height: "height", Width: "width" }, function( name, type ) {
+ jQuery.each( { padding: "inner" + name, content: type, "": "outer" + name }, function( defaultExtra, funcName ) {
+ // margin is only for outerHeight, outerWidth
+ jQuery.fn[ funcName ] = function( margin, value ) {
+ var chainable = arguments.length && ( defaultExtra || typeof margin !== "boolean" ),
+ extra = defaultExtra || ( margin === true || value === true ? "margin" : "border" );
+
+ return jQuery.access( this, function( elem, type, value ) {
+ var doc;
+
+ if ( jQuery.isWindow( elem ) ) {
+ // As of 5/8/2012 this will yield incorrect results for Mobile Safari, but there
+ // isn't a whole lot we can do. See pull request at this URL for discussion:
+ // https://github.com/jquery/jquery/pull/764
+ return elem.document.documentElement[ "client" + name ];
+ }
+
+ // Get document width or height
+ if ( elem.nodeType === 9 ) {
+ doc = elem.documentElement;
+
+ // Either scroll[Width/Height] or offset[Width/Height] or client[Width/Height], whichever is greatest
+ // unfortunately, this causes bug #3838 in IE6/8 only, but there is currently no good, small way to fix it.
+ return Math.max(
+ elem.body[ "scroll" + name ], doc[ "scroll" + name ],
+ elem.body[ "offset" + name ], doc[ "offset" + name ],
+ doc[ "client" + name ]
+ );
+ }
+
+ return value === undefined ?
+ // Get width or height on the element, requesting but not forcing parseFloat
+ jQuery.css( elem, type, value, extra ) :
+
+ // Set width or height on the element
+ jQuery.style( elem, type, value, extra );
+ }, type, chainable ? margin : undefined, chainable, null );
+ };
+ });
+});
+// Expose jQuery to the global object
+window.jQuery = window.$ = jQuery;
+
+// Expose jQuery as an AMD module, but only for AMD loaders that
+// understand the issues with loading multiple versions of jQuery
+// in a page that all might call define(). The loader will indicate
+// they have special allowances for multiple jQuery versions by
+// specifying define.amd.jQuery = true. Register as a named module,
+// since jQuery can be concatenated with other files that may use define,
+// but not use a proper concatenation script that understands anonymous
+// AMD modules. A named AMD is safest and most robust way to register.
+// Lowercase jquery is used because AMD module names are derived from
+// file names, and jQuery is normally delivered in a lowercase file name.
+// Do this after creating the global so that if an AMD module wants to call
+// noConflict to hide this version of jQuery, it will work.
+if ( typeof define === "function" && define.amd && define.amd.jQuery ) {
+ define( "jquery", [], function () { return jQuery; } );
+}
+
+})( window );
diff --git a/qa/workunits/erasure-code/plot.js b/qa/workunits/erasure-code/plot.js
new file mode 100644
index 000000000..bd2bba5bb
--- /dev/null
+++ b/qa/workunits/erasure-code/plot.js
@@ -0,0 +1,82 @@
+$(function() {
+ encode = [];
+ if (typeof encode_vandermonde_isa != 'undefined') {
+ encode.push({
+ data: encode_vandermonde_isa,
+ label: "ISA, Vandermonde",
+ points: { show: true },
+ lines: { show: true },
+ });
+ }
+ if (typeof encode_vandermonde_jerasure != 'undefined') {
+ encode.push({
+ data: encode_vandermonde_jerasure,
+ label: "Jerasure Generic, Vandermonde",
+ points: { show: true },
+ lines: { show: true },
+ });
+ }
+ if (typeof encode_cauchy_isa != 'undefined') {
+ encode.push({
+ data: encode_cauchy_isa,
+ label: "ISA, Cauchy",
+ points: { show: true },
+ lines: { show: true },
+ });
+ }
+ if (typeof encode_cauchy_jerasure != 'undefined') {
+ encode.push({
+ data: encode_cauchy_jerasure,
+ label: "Jerasure, Cauchy",
+ points: { show: true },
+ lines: { show: true },
+ });
+ }
+ $.plot("#encode", encode, {
+ xaxis: {
+ mode: "categories",
+ tickLength: 0
+ },
+ });
+
+ decode = [];
+ if (typeof decode_vandermonde_isa != 'undefined') {
+ decode.push({
+ data: decode_vandermonde_isa,
+ label: "ISA, Vandermonde",
+ points: { show: true },
+ lines: { show: true },
+ });
+ }
+ if (typeof decode_vandermonde_jerasure != 'undefined') {
+ decode.push({
+ data: decode_vandermonde_jerasure,
+ label: "Jerasure Generic, Vandermonde",
+ points: { show: true },
+ lines: { show: true },
+ });
+ }
+ if (typeof decode_cauchy_isa != 'undefined') {
+ decode.push({
+ data: decode_cauchy_isa,
+ label: "ISA, Cauchy",
+ points: { show: true },
+ lines: { show: true },
+ });
+ }
+ if (typeof decode_cauchy_jerasure != 'undefined') {
+ decode.push({
+ data: decode_cauchy_jerasure,
+ label: "Jerasure, Cauchy",
+ points: { show: true },
+ lines: { show: true },
+ });
+ }
+ $.plot("#decode", decode, {
+ xaxis: {
+ mode: "categories",
+ tickLength: 0
+ },
+ });
+
+});
diff --git a/qa/workunits/false.sh b/qa/workunits/false.sh
new file mode 100644
index 000000000..8a961b329
--- /dev/null
+++ b/qa/workunits/false.sh
@@ -0,0 +1,3 @@
+#!/bin/sh -ex
+
+false \ No newline at end of file
diff --git a/qa/workunits/fs/.gitignore b/qa/workunits/fs/.gitignore
new file mode 100644
index 000000000..f7f7a0614
--- /dev/null
+++ b/qa/workunits/fs/.gitignore
@@ -0,0 +1 @@
+test_o_trunc
diff --git a/qa/workunits/fs/Makefile b/qa/workunits/fs/Makefile
new file mode 100644
index 000000000..c9934254d
--- /dev/null
+++ b/qa/workunits/fs/Makefile
@@ -0,0 +1,11 @@
+CFLAGS = -Wall -Wextra -D_GNU_SOURCE
+
+TARGETS = test_o_trunc
+
+.c:
+ $(CC) $(CFLAGS) $@.c -o $@
+
+all: $(TARGETS)
+
+clean:
+ rm $(TARGETS)
diff --git a/qa/workunits/fs/cephfs_mirror_ha_gen.sh b/qa/workunits/fs/cephfs_mirror_ha_gen.sh
new file mode 100755
index 000000000..35ee9d4c7
--- /dev/null
+++ b/qa/workunits/fs/cephfs_mirror_ha_gen.sh
@@ -0,0 +1,69 @@
+#!/bin/bash -ex
+#
+# cephfs_mirror_ha_gen.sh - generate workload to synchronize
+#
+
+. $(dirname $0)/cephfs_mirror_helpers.sh
+
+cleanup()
+{
+ for i in `seq 1 $NR_DIRECTORIES`
+ do
+ local repo_name="${REPO_PATH_PFX}_$i"
+ for j in `seq 1 $NR_SNAPSHOTS`
+ do
+ snap_name=$repo_name/.snap/snap_$j
+ if test -d $snap_name; then
+ rmdir $snap_name
+ fi
+ done
+ done
+ exit 1
+}
+trap cleanup EXIT
+
+configure_peer()
+{
+ ceph mgr module enable mirroring
+ ceph fs snapshot mirror enable $PRIMARY_FS
+ ceph fs snapshot mirror peer_add $PRIMARY_FS client.mirror_remote@ceph $BACKUP_FS
+
+ for i in `seq 1 $NR_DIRECTORIES`
+ do
+ local repo_name="${REPO_PATH_PFX}_$i"
+ ceph fs snapshot mirror add $PRIMARY_FS "$MIRROR_SUBDIR/$repo_name"
+ done
+}
+
+create_snaps()
+{
+ for i in `seq 1 $NR_DIRECTORIES`
+ do
+ local repo_name="${REPO_PATH_PFX}_$i"
+ for j in `seq 1 $NR_SNAPSHOTS`
+ do
+ snap_name=$repo_name/.snap/snap_$j
+ r=$(( $RANDOM % 100 + 5 ))
+ arr=($repo_name "reset" "--hard" "HEAD~$r")
+ exec_git_cmd "${arr[@]}"
+ mkdir $snap_name
+ store_checksum $snap_name
+ done
+ done
+}
+
+unset CEPH_CLI_TEST_DUP_COMMAND
+
+echo "running generator on prmary file system..."
+
+# setup git repos to be used as data set
+setup_repos
+
+# turn on mirroring, add peers...
+configure_peer
+
+# snapshots on primary
+create_snaps
+
+# do not cleanup when exiting on success..
+trap - EXIT
diff --git a/qa/workunits/fs/cephfs_mirror_ha_verify.sh b/qa/workunits/fs/cephfs_mirror_ha_verify.sh
new file mode 100755
index 000000000..8d8b3859c
--- /dev/null
+++ b/qa/workunits/fs/cephfs_mirror_ha_verify.sh
@@ -0,0 +1,40 @@
+#!/bin/bash -ex
+#
+# cephfs_mirror_ha_verify.sh - verify synchronized snapshots
+#
+
+. $(dirname $0)/cephfs_mirror_helpers.sh
+
+echo "running verifier on secondary file system..."
+
+for i in `seq 1 $NR_DIRECTORIES`
+do
+ repo_name="${REPO_PATH_PFX}_$i"
+ for j in `seq 1 $NR_SNAPSHOTS`
+ do
+ for s in 1 1 2 4 4 4 4 4 8 8 8 8 16 16 32 64 64 128 128
+ do
+ sleep $s
+ snap_name=$repo_name/.snap/snap_$j
+ if test -d $repo_name; then
+ echo "checking snapshot [$snap_name] in $repo_name"
+ if test -d $snap_name; then
+ echo "generating hash for $snap_name"
+ cksum=''
+ calc_checksum $snap_name cksum
+ ret=$(compare_checksum $cksum $snap_name)
+ if [ $ret -ne 0 ]; then
+ echo "checksum failed $snap_name ($cksum)"
+ return $ret
+ else
+ echo "checksum matched $snap_name ($cksum)"
+ break
+ fi
+ fi
+ fi
+ done
+ echo "couldn't complete verification for: $snap_name"
+ done
+done
+
+echo "verify done!"
diff --git a/qa/workunits/fs/cephfs_mirror_helpers.sh b/qa/workunits/fs/cephfs_mirror_helpers.sh
new file mode 100644
index 000000000..69f1c6f3d
--- /dev/null
+++ b/qa/workunits/fs/cephfs_mirror_helpers.sh
@@ -0,0 +1,66 @@
+PRIMARY_FS='dc'
+BACKUP_FS='dc-backup'
+
+REPO=ceph-qa-suite
+REPO_DIR=ceph_repo
+REPO_PATH_PFX="$REPO_DIR/$REPO"
+
+NR_DIRECTORIES=4
+NR_SNAPSHOTS=4
+MIRROR_SUBDIR='/mirror'
+
+calc_checksum()
+{
+ local path=$1
+ local -n ref=$2
+ ref=`find -L $path -type f -exec md5sum {} + | awk '{ print $1 }' | md5sum | awk '{ print $1 }'`
+}
+
+store_checksum()
+{
+ local path=$1
+ local cksum='' #something invalid
+ local fhash=`echo -n $path | md5sum | awk '{ print $1 }'`
+ calc_checksum $path cksum
+ echo -n $cksum > "/tmp/primary-$fhash"
+}
+
+compare_checksum()
+{
+ local ret=0
+ local cksum=$1
+ local path=$2
+ local fhash=`echo -n $path | md5sum | awk '{ print $1 }'`
+ local cksum_ondisk=`cat /tmp/primary-$fhash`
+ if [ $cksum != $cksum_ondisk ]; then
+ echo "$cksum <> $cksum_ondisk"
+ ret=1
+ fi
+ echo $ret
+}
+
+exec_git_cmd()
+{
+ local arg=("$@")
+ local repo_name=${arg[0]}
+ local cmd=${arg[@]:1}
+ git --git-dir "$repo_name/.git" $cmd
+}
+
+clone_repo()
+{
+ local repo_name=$1
+ git clone --branch giant "http://github.com/ceph/$REPO" $repo_name
+}
+
+setup_repos()
+{
+ mkdir "$REPO_DIR"
+
+ for i in `seq 1 $NR_DIRECTORIES`
+ do
+ local repo_name="${REPO_PATH_PFX}_$i"
+ mkdir $repo_name
+ clone_repo $repo_name
+ done
+}
diff --git a/qa/workunits/fs/damage/test-first-damage.sh b/qa/workunits/fs/damage/test-first-damage.sh
new file mode 100755
index 000000000..57447b957
--- /dev/null
+++ b/qa/workunits/fs/damage/test-first-damage.sh
@@ -0,0 +1,194 @@
+#!/bin/bash
+
+set -ex
+
+FIRST_DAMAGE="first-damage.py"
+FS=cephfs
+METADATA_POOL=cephfs_meta
+MOUNT=~/mnt/mnt.0
+PYTHON=python3
+
+function usage {
+ printf '%s: [--fs=<fs_name>] [--metadata-pool=<pool>] [--first-damage=</path/to/first-damage.py>]\n'
+ exit 1
+}
+
+
+function create {
+ ceph config set mds mds_bal_fragment_dirs 0
+ mkdir dir
+ DIR_INODE=$(stat -c '%i' dir)
+ touch dir/a
+ touch dir/"a space"
+ touch -- $(printf 'dir/\xff')
+ mkdir dir/.snap/1
+ mkdir dir/.snap/2
+ # two snaps
+ rm dir/a
+ mkdir dir/.snap/3
+ # not present in HEAD
+ touch dir/a
+ mkdir dir/.snap/4
+ # one snap
+ rm dir/a
+ touch dir/a
+ mkdir dir/.snap/5
+ # unlink then create
+ rm dir/a
+ touch dir/a
+ # unlink then create, HEAD not snapped
+ ls dir/.snap/*/
+ mkdir big
+ BIG_DIR_INODE=$(stat -c '%i' big)
+ for i in `seq 1 15000`; do
+ touch $(printf 'big/%08d' $i)
+ done
+}
+
+function flush {
+ ceph tell mds."$FS":0 flush journal
+}
+
+function damage {
+ local IS=$(printf '%llx.%08llx' "$DIR_INODE" 0)
+ local LS=$(ceph tell mds."$FS":0 dump snaps | jq .last_created)
+
+ local T=$(mktemp -p /tmp)
+
+ # nuke snap 1 version of "a"
+ rados --pool="$METADATA_POOL" getomapval "$IS" a_$(printf %x $((LS-4))) "$T"
+ printf '\xff\xff\xff\xf0' | dd of="$T" count=4 bs=1 conv=notrunc,nocreat
+ rados --pool="$METADATA_POOL" setomapval "$IS" a_$(printf %x $((LS-4))) --input-file="$T"
+
+ # nuke snap 4 version of "a"
+ rados --pool="$METADATA_POOL" getomapval "$IS" a_$(printf %x $((LS-1))) "$T"
+ printf '\xff\xff\xff\xff' | dd of="$T" count=4 bs=1 conv=notrunc,nocreat
+ rados --pool="$METADATA_POOL" setomapval "$IS" a_$(printf %x $((LS-1))) --input-file="$T"
+
+ # screw up HEAD
+ rados --pool="$METADATA_POOL" getomapval "$IS" a_head "$T"
+ printf '\xfe\xff\xff\xff' | dd of="$T" count=4 bs=1 conv=notrunc,nocreat
+ rados --pool="$METADATA_POOL" setomapval "$IS" a_head --input-file="$T"
+
+ # screw up HEAD on what dentry in big
+ IS=$(printf '%llx.%08llx' "$BIG_DIR_INODE" 0)
+ rados --pool="$METADATA_POOL" getomapval "$IS" 00009999_head "$T"
+ printf '\xfe\xff\xff\xff' | dd of="$T" count=4 bs=1 conv=notrunc,nocreat
+ rados --pool="$METADATA_POOL" setomapval "$IS" 00009999_head --input-file="$T"
+
+ rm -f "$T"
+}
+
+function recover {
+ flush
+ ceph fs fail "$FS"
+ sleep 5
+ cephfs-journal-tool --rank="$FS":0 event recover_dentries summary
+ cephfs-journal-tool --rank="$FS":0 journal reset
+ "$PYTHON" $FIRST_DAMAGE --debug /tmp/debug1 --memo /tmp/memo1 "$METADATA_POOL"
+ "$PYTHON" $FIRST_DAMAGE --debug /tmp/debug2 --memo /tmp/memo2 --repair-nosnap "$METADATA_POOL"
+ "$PYTHON" $FIRST_DAMAGE --debug /tmp/debug3 --memo /tmp/memo3 --remove "$METADATA_POOL"
+ ceph fs set "$FS" joinable true
+}
+
+function check {
+ stat dir || exit 1
+ stat dir/a || exit 1
+ for i in `seq 1 5`; do
+ stat dir/.snap/$i || exit 2
+ done
+ stat dir/.snap/2/a || exit 3
+ stat dir/.snap/5/a || exit 4
+ if stat dir/.snap/1/a; then
+ echo should be gone
+ exit 5
+ fi
+ if stat dir/.snap/3/a; then
+ echo should not ever exist
+ exit 6
+ fi
+ if stat dir/.snap/4/a; then
+ echo should be gone
+ exit 7
+ fi
+}
+
+function cleanup {
+ rmdir dir/.snap/*
+ find dir
+ rm -rf dir
+}
+
+function mount {
+ sudo --preserve-env=CEPH_CONF bin/mount.ceph :/ "$MOUNT" -o name=admin,noshare
+ df -h "$MOUNT"
+}
+
+function main {
+ eval set -- $(getopt --name "$0" --options '' --longoptions 'help,fs:,metadata-pool:,first-damage:,mount:,python:' -- "$@")
+
+ while [ "$#" -gt 0 ]; do
+ echo "$*"
+ echo "$1"
+ case "$1" in
+ -h|--help)
+ usage
+ ;;
+ --fs)
+ FS="$2"
+ shift 2
+ ;;
+ --metadata-pool)
+ METADATA_POOL="$2"
+ shift 2
+ ;;
+ --mount)
+ MOUNT="$2"
+ shift 2
+ ;;
+ --first-damage)
+ FIRST_DAMAGE="$2"
+ shift 2
+ ;;
+ --python)
+ PYTHON="$2"
+ shift 2
+ ;;
+ --)
+ shift
+ break
+ ;;
+ *)
+ usage
+ ;;
+ esac
+ done
+
+ mount
+
+ pushd "$MOUNT"
+ create
+ popd
+
+ sudo umount -f "$MOUNT"
+
+ # flush dentries/inodes to omap
+ flush
+
+ damage
+
+ recover
+
+ sleep 5 # for mds to join
+
+ mount
+
+ pushd "$MOUNT"
+ check
+ cleanup
+ popd
+
+ sudo umount -f "$MOUNT"
+}
+
+main "$@"
diff --git a/qa/workunits/fs/fscrypt.sh b/qa/workunits/fs/fscrypt.sh
new file mode 100755
index 000000000..ca856a62e
--- /dev/null
+++ b/qa/workunits/fs/fscrypt.sh
@@ -0,0 +1,119 @@
+#!/usr/bin/env bash
+
+set -xe
+
+mydir=`dirname $0`
+
+if [ $# -ne 2 ]
+then
+ echo "2 parameters are required!\n"
+ echo "Usage:"
+ echo " fscrypt.sh <type> <testdir>"
+ echo " type: should be any of 'none', 'unlocked' or 'locked'"
+ echo " testdir: the test direcotry name"
+ exit 1
+fi
+
+fscrypt=$1
+testcase=$2
+testdir=fscrypt_test_${fscrypt}_${testcase}
+mkdir $testdir
+
+XFSPROGS_DIR='xfprogs-dev-dir'
+XFSTESTS_DIR='xfstest-dev-dir'
+export XFS_IO_PROG="$(type -P xfs_io)"
+
+# Setup the xfstests env
+setup_xfstests_env()
+{
+ git clone https://git.ceph.com/xfstests-dev.git $XFSTESTS_DIR --depth 1
+ pushd $XFSTESTS_DIR
+ . common/encrypt
+ popd
+}
+
+install_deps()
+{
+ local system_value=$(sudo lsb_release -is | awk '{print tolower($0)}')
+ case $system_value in
+ "centos" | "centosstream" | "fedora")
+ sudo yum install -y inih-devel userspace-rcu-devel \
+ libblkid-devel gettext libedit-devel \
+ libattr-devel device-mapper-devel libicu-devel
+ ;;
+ "ubuntu" | "debian")
+ sudo apt-get install -y libinih-dev liburcu-dev \
+ libblkid-dev gettext libedit-dev libattr1-dev \
+ libdevmapper-dev libicu-dev pkg-config
+ ;;
+ *)
+ echo "Unsupported distro $system_value"
+ exit 1
+ ;;
+ esac
+}
+
+# Install xfsprogs-dev from source to support "add_enckey" for xfs_io
+install_xfsprogs()
+{
+ local install_xfsprogs=0
+
+ xfs_io -c "help add_enckey" | grep -q 'not found' && install_xfsprogs=1
+
+ if [ $install_xfsprogs -eq 1 ]; then
+ install_deps
+
+ git clone https://git.ceph.com/xfsprogs-dev.git $XFSPROGS_DIR --depth 1
+ pushd $XFSPROGS_DIR
+ make
+ sudo make install
+ popd
+ fi
+}
+
+clean_up()
+{
+ rm -rf $XFSPROGS_DIR
+ rm -rf $XFSTESTS_DIR
+ rm -rf $testdir
+}
+
+# For now will test the V2 encryption policy only as the
+# V1 encryption policy is deprecated
+
+install_xfsprogs
+setup_xfstests_env
+
+# Generate a fixed keying identifier
+raw_key=$(_generate_raw_encryption_key)
+keyid=$(_add_enckey $testdir "$raw_key" | awk '{print $NF}')
+
+case ${fscrypt} in
+ "none")
+ # do nothing for the test directory and will test it
+ # as one non-encrypted directory.
+ pushd $testdir
+ ${mydir}/../suites/${testcase}.sh
+ popd
+ clean_up
+ ;;
+ "unlocked")
+ # set encrypt policy with the key provided and then
+ # the test directory will be encrypted & unlocked
+ _set_encpolicy $testdir $keyid
+ pushd $testdir
+ ${mydir}/../suites/${testcase}.sh
+ popd
+ clean_up
+ ;;
+ "locked")
+ # remove the key, then the test directory will be locked
+ # and any modification will be denied by requiring the key
+ _rm_enckey $testdir $keyid
+ clean_up
+ ;;
+ *)
+ clean_up
+ echo "Unknown parameter $1"
+ exit 1
+esac
diff --git a/qa/workunits/fs/full/subvolume_clone.sh b/qa/workunits/fs/full/subvolume_clone.sh
new file mode 100755
index 000000000..a11131215
--- /dev/null
+++ b/qa/workunits/fs/full/subvolume_clone.sh
@@ -0,0 +1,114 @@
+#!/usr/bin/env bash
+set -ex
+
+# This testcase tests the 'ceph fs subvolume snapshot clone' when the osd is full.
+# The clone fails with 'MetadataMgrException: -28 (error in write)' and
+# truncates the config file of corresponding subvolume while updating the config file.
+# Hence the subsequent subvolume commands on the clone fails with
+# 'MetadataMgrException: -2 (section 'GLOBAL' does not exist)' traceback.
+
+# The osd is of the size 1GB. The full-ratios are set so that osd is treated full
+# at around 600MB. The subvolume is created and 100MB is written.
+# The subvolume is snapshotted and cloned ten times. Since the clone delay is set to 15 seconds,
+# all the clones reach pending state for sure. Among ten clones, only few succeed and rest fails
+# with ENOSPACE.
+
+# At this stage, ".meta" config file of the failed clones are checked if it's truncated.
+# and clone status command is checked for traceback.
+
+# Note that the failed clones would be in retry loop and it's state would be 'pending' or 'in-progress'.
+# It's state is not updated to 'failed' as the config update to gets ENOSPACE too.
+
+set -e
+ignore_failure() {
+ if "$@"; then return 0; else return 0; fi
+}
+
+expect_failure() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+NUM_CLONES=10
+
+ceph fs subvolume create cephfs sub_0
+subvol_path_0=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null)
+
+# For debugging
+echo "Before ratios are set"
+df $CEPH_MNT
+ceph osd df
+
+ceph osd set-full-ratio 0.6
+ceph osd set-nearfull-ratio 0.50
+ceph osd set-backfillfull-ratio 0.55
+
+# For debugging
+echo "After ratios are set"
+df -h
+ceph osd df
+
+for i in {1..100};do sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path_0/1MB_file-$i status=progress bs=1M count=1 conv=fdatasync;done
+
+# For debugging
+echo "After subvolumes are written"
+df -h $CEPH_MNT
+ceph osd df
+
+# snapshot
+ceph fs subvolume snapshot create cephfs sub_0 snap_0
+
+# Set clone snapshot delay
+ceph config set mgr mgr/volumes/snapshot_clone_delay 15
+
+# Schedule few clones, some would fail with no space
+for i in $(eval echo {1..$NUM_CLONES});do ceph fs subvolume snapshot clone cephfs sub_0 snap_0 clone_$i;done
+
+# Wait for osd is full
+timeout=90
+while [ $timeout -gt 0 ]
+do
+ health=$(ceph health detail)
+ [[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break
+ echo "Wating for osd to be full: $timeout"
+ sleep 1
+ let "timeout-=1"
+done
+
+# For debugging
+echo "After osd is full"
+df -h $CEPH_MNT
+ceph osd df
+
+# Check clone status, this should not crash
+for i in $(eval echo {1..$NUM_CLONES})
+do
+ ignore_failure ceph fs clone status cephfs clone_$i >/tmp/out_${PID}_file 2>/tmp/error_${PID}_file
+ cat /tmp/error_${PID}_file
+ if grep "complete" /tmp/out_${PID}_file; then
+ echo "The clone_$i is completed"
+ else
+ #in-progress/pending clones, No traceback should be found in stderr
+ echo clone_$i in PENDING/IN-PROGRESS
+ expect_failure sudo grep "Traceback" /tmp/error_${PID}_file
+ #config file should not be truncated and GLOBAL section should be found
+ sudo grep "GLOBAL" $CEPH_MNT/volumes/_nogroup/clone_$i/.meta
+ fi
+done
+
+# Hard cleanup
+ignore_failure sudo rm -rf $CEPH_MNT/_index/clone/*
+ignore_failure sudo rm -rf $CEPH_MNT/volumes/_nogroup/clone_*
+ignore_failure sudo rmdir $CEPH_MNT/volumes/_nogroup/sub_0/.snap/snap_0
+ignore_failure sudo rm -rf $CEPH_MNT/volumes/_nogroup/sub_0
+
+#Set the ratios back for other full tests to run
+ceph osd set-full-ratio 0.95
+ceph osd set-nearfull-ratio 0.95
+ceph osd set-backfillfull-ratio 0.95
+
+#After test
+echo "After test"
+df -h $CEPH_MNT
+ceph osd df
+
+echo OK
diff --git a/qa/workunits/fs/full/subvolume_rm.sh b/qa/workunits/fs/full/subvolume_rm.sh
new file mode 100755
index 000000000..a464e30f5
--- /dev/null
+++ b/qa/workunits/fs/full/subvolume_rm.sh
@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+set -ex
+
+# This testcase tests the scenario of the 'ceph fs subvolume rm' mgr command
+# when the osd is full. The command used to hang. The osd is of the size 1GB.
+# The subvolume is created and 500MB file is written. The full-ratios are
+# set below 500MB such that the osd is treated as full. Now the subvolume is
+# is removed. This should be successful with the introduction of FULL
+# capabilities which the mgr holds.
+
+set -e
+expect_failure() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+ceph fs subvolume create cephfs sub_0
+subvol_path=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null)
+
+#For debugging
+echo "Before write"
+df -h
+ceph osd df
+
+sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/500MB_file-1 status=progress bs=1M count=500
+
+ceph osd set-full-ratio 0.2
+ceph osd set-nearfull-ratio 0.16
+ceph osd set-backfillfull-ratio 0.18
+
+timeout=30
+while [ $timeout -gt 0 ]
+do
+ health=$(ceph health detail)
+ [[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break
+ echo "Wating for osd to be full: $timeout"
+ sleep 1
+ let "timeout-=1"
+done
+
+#For debugging
+echo "After ratio set"
+df -h
+ceph osd df
+
+#Delete subvolume
+ceph fs subvolume rm cephfs sub_0
+
+#Validate subvolume is deleted
+expect_failure ceph fs subvolume info cephfs sub_0
+
+#Wait for subvolume to delete data
+trashdir=$CEPH_MNT/volumes/_deleting
+timeout=30
+while [ $timeout -gt 0 ]
+do
+ [ -z "$(sudo ls -A $trashdir)" ] && echo "Trash directory $trashdir is empty" && break
+ echo "Wating for trash dir to be empty: $timeout"
+ sleep 1
+ let "timeout-=1"
+done
+
+#Set the ratios back for other full tests to run
+ceph osd set-full-ratio 0.95
+ceph osd set-nearfull-ratio 0.95
+ceph osd set-backfillfull-ratio 0.95
+
+#After test
+echo "After test"
+df -h
+ceph osd df
+
+echo OK
diff --git a/qa/workunits/fs/full/subvolume_snapshot_rm.sh b/qa/workunits/fs/full/subvolume_snapshot_rm.sh
new file mode 100755
index 000000000..f6d0add9f
--- /dev/null
+++ b/qa/workunits/fs/full/subvolume_snapshot_rm.sh
@@ -0,0 +1,86 @@
+#!/usr/bin/env bash
+set -ex
+
+# This testcase tests the 'ceph fs subvolume snapshot rm' when the osd is full.
+# The snapshot rm fails with 'MetadataMgrException: -28 (error in write)' and
+# truncates the config file of corresponding subvolume. Hence the subsequent
+# snapshot rm of the same snapshot fails with 'MetadataMgrException: -2 (section 'GLOBAL' does not exist)'
+# traceback.
+
+# The osd is of the size 1GB. The subvolume is created and 800MB file is written.
+# Then full-ratios are set below 500MB such that the osd is treated as full.
+# The subvolume snapshot is taken which succeeds as no extra space is required
+# for snapshot. Now, the removal of the snapshot fails with ENOSPACE as it
+# fails to remove the snapshot metadata set. The snapshot removal fails
+# but should not traceback and truncate the config file.
+
+set -e
+expect_failure() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+ignore_failure() {
+ if "$@"; then return 0; else return 0; fi
+}
+
+ceph fs subvolume create cephfs sub_0
+subvol_path=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null)
+
+#For debugging
+echo "Before write"
+df $CEPH_MNT
+ceph osd df
+
+# Write 800MB file and set full ratio to around 200MB
+ignore_failure sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/800MB_file-1 status=progress bs=1M count=800 conv=fdatasync
+
+ceph osd set-full-ratio 0.2
+ceph osd set-nearfull-ratio 0.16
+ceph osd set-backfillfull-ratio 0.18
+
+timeout=30
+while [ $timeout -gt 0 ]
+do
+ health=$(ceph health detail)
+ [[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break
+ echo "Wating for osd to be full: $timeout"
+ sleep 1
+ let "timeout-=1"
+done
+
+#Take snapshot
+ceph fs subvolume snapshot create cephfs sub_0 snap_0
+
+#Remove snapshot fails but should not throw traceback
+expect_failure ceph fs subvolume snapshot rm cephfs sub_0 snap_0 2>/tmp/error_${PID}_file
+cat /tmp/error_${PID}_file
+
+# No traceback should be found
+expect_failure grep "Traceback" /tmp/error_${PID}_file
+
+# Validate config file is not truncated and GLOBAL section exists
+sudo grep "GLOBAL" $CEPH_MNT/volumes/_nogroup/sub_0/.meta
+
+#For debugging
+echo "After write"
+df $CEPH_MNT
+ceph osd df
+
+# Snapshot removal with force option should succeed
+ceph fs subvolume snapshot rm cephfs sub_0 snap_0 --force
+
+#Cleanup from backend
+ignore_failure sudo rm -f /tmp/error_${PID}_file
+ignore_failure sudo rm -rf $CEPH_MNT/volumes/_nogroup/sub_0
+
+#Set the ratios back for other full tests to run
+ceph osd set-full-ratio 0.95
+ceph osd set-nearfull-ratio 0.95
+ceph osd set-backfillfull-ratio 0.95
+
+#After test
+echo "After test"
+df -h $CEPH_MNT
+ceph osd df
+
+echo OK
diff --git a/qa/workunits/fs/maxentries/maxentries.sh b/qa/workunits/fs/maxentries/maxentries.sh
new file mode 100755
index 000000000..d48fd956e
--- /dev/null
+++ b/qa/workunits/fs/maxentries/maxentries.sh
@@ -0,0 +1,155 @@
+#!/usr/bin/env bash
+
+set -ex
+
+function expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+function make_files()
+{
+ set +x
+ temp_dir=`mktemp -d mkfile_test_XXXXXX`
+ for i in $(seq 1 $1)
+ do
+ echo -n | dd of="${temp_dir}/file_$i" conv=fsync || return 1
+ sync "${temp_dir}" || return 1
+ done
+ set -x
+ return 0
+}
+
+function make_dirs()
+{
+ set +x
+ temp_dir=`mktemp -d mkdir_test_XXXXXX`
+ for i in $(seq 1 $1)
+ do
+ mkdir -p ${temp_dir}/dir_${i} || return 1
+ sync "${temp_dir}" || return 1
+ done
+ set -x
+ return 0
+}
+
+function make_nodes()
+{
+ set +x
+ temp_dir=`mktemp -d mknod_test_XXXXXX`
+ for i in $(seq 1 $1)
+ do
+ mknod ${temp_dir}/fifo_${i} p || return 1
+ sync "${temp_dir}" || return 1
+ done
+ set -x
+ return 0
+}
+
+function rename_files()
+{
+ set +x
+ temp_dir=`mktemp -d rename_test_XXXXXX`
+ mkdir -p ${temp_dir}/rename
+
+ for i in $(seq 1 $1)
+ do
+ touch ${temp_dir}/file_${i} || return 1
+
+ mv ${temp_dir}/file_${i} ${temp_dir}/rename/ || return 1
+ sync "${temp_dir}" || return 1
+ done
+ set -x
+ return 0
+}
+
+function make_symlinks()
+{
+ set +x
+ temp_dir=`mktemp -d symlink_test_XXXXXX`
+ mkdir -p ${temp_dir}/symlink
+
+ touch ${temp_dir}/file
+
+ for i in $(seq 1 $1)
+ do
+ ln -s ../file ${temp_dir}/symlink/sym_${i} || return 1
+ sync "${temp_dir}" || return 1
+ done
+ set -x
+ return 0
+}
+
+function make_links()
+{
+ set +x
+ temp_dir=`mktemp -d link_test_XXXXXX`
+ mkdir -p ${temp_dir}/link
+
+ touch ${temp_dir}/file
+
+ for i in $(seq 1 $1)
+ do
+ ln ${temp_dir}/file ${temp_dir}/link/link_${i} || return 1
+ sync "${temp_dir}" || return 1
+ done
+ set -x
+ return 0
+}
+
+function cleanup()
+{
+ rm -rf *
+}
+
+test_dir="max_entries"
+mkdir -p $test_dir
+pushd $test_dir
+
+dir_max_entries=100
+ceph config set mds mds_dir_max_entries $dir_max_entries
+
+ok_dir_max_entries=$dir_max_entries
+fail_dir_max_entries=$((dir_max_entries+1))
+
+# make files test
+make_files $ok_dir_max_entries
+expect_false make_files $fail_dir_max_entries
+
+# make dirs test
+make_dirs $ok_dir_max_entries
+expect_false make_dirs $fail_dir_max_entries
+
+# make nodes test
+make_nodes $ok_dir_max_entries
+expect_false make_nodes $fail_dir_max_entries
+
+# rename files test
+rename_files $ok_dir_max_entries
+expect_false rename_files $fail_dir_max_entries
+
+# symlink files test
+make_symlinks $ok_dir_max_entries
+expect_false make_symlinks $fail_dir_max_entries
+
+# link files test
+make_links $ok_dir_max_entries
+expect_false make_links $fail_dir_max_entries
+
+# no limit (e.g., default value)
+dir_max_entries=0
+ceph config set mds mds_dir_max_entries $dir_max_entries
+
+make_files 500
+make_dirs 500
+make_nodes 500
+rename_files 500
+make_symlinks 500
+make_links 500
+
+cleanup
+
+popd # $test_dir
+
+echo OK
diff --git a/qa/workunits/fs/misc/acl.sh b/qa/workunits/fs/misc/acl.sh
new file mode 100755
index 000000000..198b05671
--- /dev/null
+++ b/qa/workunits/fs/misc/acl.sh
@@ -0,0 +1,50 @@
+#!/bin/sh -x
+
+set -e
+mkdir -p testdir
+cd testdir
+
+set +e
+setfacl -d -m u:nobody:rw .
+if test $? != 0; then
+ echo "Filesystem does not support ACL"
+ exit 0
+fi
+
+expect_failure() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+set -e
+c=0
+while [ $c -lt 100 ]
+do
+ c=`expr $c + 1`
+ # inherited ACL from parent directory's default ACL
+ mkdir d1
+ c1=`getfacl d1 | grep -c "nobody:rw"`
+ echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null
+ c2=`getfacl d1 | grep -c "nobody:rw"`
+ rmdir d1
+ if [ $c1 -ne 2 ] || [ $c2 -ne 2 ]
+ then
+ echo "ERROR: incorrect ACLs"
+ exit 1
+ fi
+done
+
+mkdir d1
+
+# The ACL xattr only contains ACL header. ACL should be removed
+# in this case.
+setfattr -n system.posix_acl_access -v 0x02000000 d1
+setfattr -n system.posix_acl_default -v 0x02000000 .
+
+expect_failure getfattr -n system.posix_acl_access d1
+expect_failure getfattr -n system.posix_acl_default .
+
+
+rmdir d1
+cd ..
+rmdir testdir
+echo OK
diff --git a/qa/workunits/fs/misc/chmod.sh b/qa/workunits/fs/misc/chmod.sh
new file mode 100755
index 000000000..de66776f1
--- /dev/null
+++ b/qa/workunits/fs/misc/chmod.sh
@@ -0,0 +1,60 @@
+#!/bin/sh -x
+
+set -e
+
+check_perms() {
+
+ file=$1
+ r=$(ls -la ${file})
+ if test $? != 0; then
+ echo "ERROR: File listing/stat failed"
+ exit 1
+ fi
+
+ perms=$2
+ if test "${perms}" != $(echo ${r} | awk '{print $1}') && \
+ test "${perms}." != $(echo ${r} | awk '{print $1}') && \
+ test "${perms}+" != $(echo ${r} | awk '{print $1}'); then
+ echo "ERROR: Permissions should be ${perms}"
+ exit 1
+ fi
+}
+
+file=test_chmod.$$
+
+echo "foo" > ${file}
+if test $? != 0; then
+ echo "ERROR: Failed to create file ${file}"
+ exit 1
+fi
+
+chmod 400 ${file}
+if test $? != 0; then
+ echo "ERROR: Failed to change mode of ${file}"
+ exit 1
+fi
+
+check_perms ${file} "-r--------"
+
+set +e
+echo "bar" >> ${file}
+if test $? = 0; then
+ echo "ERROR: Write to read-only file should Fail"
+ exit 1
+fi
+
+set -e
+chmod 600 ${file}
+echo "bar" >> ${file}
+if test $? != 0; then
+ echo "ERROR: Write to writeable file failed"
+ exit 1
+fi
+
+check_perms ${file} "-rw-------"
+
+echo "foo" >> ${file}
+if test $? != 0; then
+ echo "ERROR: Failed to write to file"
+ exit 1
+fi
diff --git a/qa/workunits/fs/misc/dac_override.sh b/qa/workunits/fs/misc/dac_override.sh
new file mode 100755
index 000000000..dfb1a9091
--- /dev/null
+++ b/qa/workunits/fs/misc/dac_override.sh
@@ -0,0 +1,19 @@
+#!/bin/sh -x
+
+expect_failure() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+set -e
+
+mkdir -p testdir
+file=test_chmod.$$
+
+echo "foo" > testdir/${file}
+sudo chmod 600 testdir
+
+# only root can read
+expect_failure cat testdir/${file}
+
+# directory read/write DAC override for root should allow read
+sudo cat testdir/${file}
diff --git a/qa/workunits/fs/misc/direct_io.py b/qa/workunits/fs/misc/direct_io.py
new file mode 100755
index 000000000..f7d59d95a
--- /dev/null
+++ b/qa/workunits/fs/misc/direct_io.py
@@ -0,0 +1,42 @@
+#!/usr/bin/python3
+
+import mmap
+import os
+import subprocess
+
+def main():
+ path = "testfile"
+ fd = os.open(path, os.O_RDWR | os.O_CREAT | os.O_TRUNC | os.O_DIRECT, 0o644)
+
+ ino = os.fstat(fd).st_ino
+ obj_name = "{ino:x}.00000000".format(ino=ino)
+ pool_name = os.getxattr(path, "ceph.file.layout.pool")
+
+ buf = mmap.mmap(-1, 1)
+ buf.write(b'1')
+ os.write(fd, buf)
+
+ proc = subprocess.Popen(['rados', '-p', pool_name, 'get', obj_name, 'tmpfile'])
+ proc.wait()
+
+ with open('tmpfile', 'rb') as tmpf:
+ out = tmpf.read(1)
+ if out != b'1':
+ raise RuntimeError("data were not written to object store directly")
+
+ with open('tmpfile', 'wb') as tmpf:
+ tmpf.write(b'2')
+
+ proc = subprocess.Popen(['rados', '-p', pool_name, 'put', obj_name, 'tmpfile'])
+ proc.wait()
+
+ os.lseek(fd, 0, os.SEEK_SET)
+ out = os.read(fd, 1)
+ if out != b'2':
+ raise RuntimeError("data were not directly read from object store")
+
+ os.close(fd)
+ print('ok')
+
+
+main()
diff --git a/qa/workunits/fs/misc/dirfrag.sh b/qa/workunits/fs/misc/dirfrag.sh
new file mode 100755
index 000000000..eea0ec3bc
--- /dev/null
+++ b/qa/workunits/fs/misc/dirfrag.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+set -e
+
+DEPTH=5
+COUNT=10000
+
+kill_jobs() {
+ jobs -p | xargs kill
+}
+trap kill_jobs INT
+
+create_files() {
+ for i in `seq 1 $COUNT`
+ do
+ touch file$i
+ done
+}
+
+delete_files() {
+ for i in `ls -f`
+ do
+ if [[ ${i}a = file*a ]]
+ then
+ rm -f $i
+ fi
+ done
+}
+
+rm -rf testdir
+mkdir testdir
+cd testdir
+
+echo "creating folder hierarchy"
+for i in `seq 1 $DEPTH`; do
+ mkdir dir$i
+ cd dir$i
+ create_files &
+done
+wait
+
+echo "created hierarchy, now cleaning up"
+
+for i in `seq 1 $DEPTH`; do
+ delete_files &
+ cd ..
+done
+wait
+
+echo "cleaned up hierarchy"
+cd ..
+rm -rf testdir
diff --git a/qa/workunits/fs/misc/filelock_deadlock.py b/qa/workunits/fs/misc/filelock_deadlock.py
new file mode 100755
index 000000000..398902f6c
--- /dev/null
+++ b/qa/workunits/fs/misc/filelock_deadlock.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python3
+
+import errno
+import fcntl
+import os
+import signal
+import struct
+import time
+
+
+def handler(signum, frame):
+ pass
+
+
+def lock_two(f1, f2):
+ lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 10, 0, 0)
+ fcntl.fcntl(f1, fcntl.F_SETLKW, lockdata)
+ time.sleep(10)
+
+ # don't wait forever
+ signal.signal(signal.SIGALRM, handler)
+ signal.alarm(10)
+ exitcode = 0
+ try:
+ fcntl.fcntl(f2, fcntl.F_SETLKW, lockdata)
+ except IOError as e:
+ if e.errno == errno.EDEADLK:
+ exitcode = 1
+ elif e.errno == errno.EINTR:
+ exitcode = 2
+ else:
+ exitcode = 3
+ os._exit(exitcode)
+
+
+def main():
+ pid1 = os.fork()
+ if pid1 == 0:
+ f1 = open("testfile1", 'w')
+ f2 = open("testfile2", 'w')
+ lock_two(f1, f2)
+
+ pid2 = os.fork()
+ if pid2 == 0:
+ f1 = open("testfile2", 'w')
+ f2 = open("testfile3", 'w')
+ lock_two(f1, f2)
+
+ pid3 = os.fork()
+ if pid3 == 0:
+ f1 = open("testfile3", 'w')
+ f2 = open("testfile1", 'w')
+ lock_two(f1, f2)
+
+ deadlk_count = 0
+ i = 0
+ while i < 3:
+ pid, status = os.wait()
+ exitcode = status >> 8
+ if exitcode == 1:
+ deadlk_count += 1
+ elif exitcode != 0:
+ raise RuntimeError("unexpect exit code of child")
+ i += 1
+
+ if deadlk_count != 1:
+ raise RuntimeError("unexpect count of EDEADLK")
+
+ print('ok')
+
+
+main()
diff --git a/qa/workunits/fs/misc/filelock_interrupt.py b/qa/workunits/fs/misc/filelock_interrupt.py
new file mode 100755
index 000000000..b261d74fb
--- /dev/null
+++ b/qa/workunits/fs/misc/filelock_interrupt.py
@@ -0,0 +1,94 @@
+#!/usr/bin/python3
+
+from contextlib import contextmanager
+import errno
+import fcntl
+import signal
+import struct
+
+@contextmanager
+def timeout(seconds):
+ def timeout_handler(signum, frame):
+ raise InterruptedError
+
+ orig_handler = signal.signal(signal.SIGALRM, timeout_handler)
+ try:
+ signal.alarm(seconds)
+ yield
+ finally:
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, orig_handler)
+
+
+"""
+introduced by Linux 3.15
+"""
+setattr(fcntl, "F_OFD_GETLK", 36)
+setattr(fcntl, "F_OFD_SETLK", 37)
+setattr(fcntl, "F_OFD_SETLKW", 38)
+
+
+def main():
+ f1 = open("testfile", 'w')
+ f2 = open("testfile", 'w')
+
+ fcntl.flock(f1, fcntl.LOCK_SH | fcntl.LOCK_NB)
+
+ """
+ is flock interruptible?
+ """
+ with timeout(5):
+ try:
+ fcntl.flock(f2, fcntl.LOCK_EX)
+ except InterruptedError:
+ pass
+ else:
+ raise RuntimeError("expect flock to block")
+
+ fcntl.flock(f1, fcntl.LOCK_UN)
+
+ lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 10, 0, 0)
+ try:
+ fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata)
+ except IOError as e:
+ if e.errno != errno.EINVAL:
+ raise
+ else:
+ print('kernel does not support fcntl.F_OFD_SETLK')
+ return
+
+ lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 10, 10, 0, 0)
+ fcntl.fcntl(f2, fcntl.F_OFD_SETLK, lockdata)
+
+ """
+ is posix lock interruptible?
+ """
+ with timeout(5):
+ try:
+ lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
+ fcntl.fcntl(f2, fcntl.F_OFD_SETLKW, lockdata)
+ except InterruptedError:
+ pass
+ else:
+ raise RuntimeError("expect posix lock to block")
+
+ """
+ file handler 2 should still hold lock on 10~10
+ """
+ try:
+ lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 10, 10, 0, 0)
+ fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata)
+ except IOError as e:
+ if e.errno == errno.EAGAIN:
+ pass
+ else:
+ raise RuntimeError("expect file handler 2 to hold lock on 10~10")
+
+ lockdata = struct.pack('hhllhh', fcntl.F_UNLCK, 0, 0, 0, 0, 0)
+ fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata)
+ fcntl.fcntl(f2, fcntl.F_OFD_SETLK, lockdata)
+
+ print('ok')
+
+
+main()
diff --git a/qa/workunits/fs/misc/i_complete_vs_rename.sh b/qa/workunits/fs/misc/i_complete_vs_rename.sh
new file mode 100755
index 000000000..a9b98271d
--- /dev/null
+++ b/qa/workunits/fs/misc/i_complete_vs_rename.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+set -e
+
+mkdir x
+cd x
+touch a
+touch b
+touch c
+touch d
+ls
+chmod 777 .
+stat e || true
+touch f
+touch g
+
+# over existing file
+echo attempting rename over existing file...
+touch ../xx
+mv ../xx f
+ls | grep f || false
+echo rename over existing file is okay
+
+# over negative dentry
+echo attempting rename over negative dentry...
+touch ../xx
+mv ../xx e
+ls | grep e || false
+echo rename over negative dentry is ok
+
+echo OK
diff --git a/qa/workunits/fs/misc/layout_vxattrs.sh b/qa/workunits/fs/misc/layout_vxattrs.sh
new file mode 100755
index 000000000..811336273
--- /dev/null
+++ b/qa/workunits/fs/misc/layout_vxattrs.sh
@@ -0,0 +1,115 @@
+#!/usr/bin/env bash
+
+set -ex
+
+# detect data pool
+datapool=
+dir=.
+while true ; do
+ echo $dir
+ datapool=$(getfattr -n ceph.dir.layout.pool $dir --only-values) && break
+ dir=$dir/..
+done
+
+# file
+rm -f file file2
+touch file file2
+
+getfattr -n ceph.file.layout file
+getfattr -n ceph.file.layout file | grep -q object_size=
+getfattr -n ceph.file.layout file | grep -q stripe_count=
+getfattr -n ceph.file.layout file | grep -q stripe_unit=
+getfattr -n ceph.file.layout file | grep -q pool=
+getfattr -n ceph.file.layout.pool file
+getfattr -n ceph.file.layout.pool_namespace file
+getfattr -n ceph.file.layout.stripe_unit file
+getfattr -n ceph.file.layout.stripe_count file
+getfattr -n ceph.file.layout.object_size file
+
+getfattr -n ceph.file.layout.bogus file 2>&1 | grep -q 'No such attribute'
+getfattr -n ceph.dir.layout file 2>&1 | grep -q 'No such attribute'
+
+setfattr -n ceph.file.layout.stripe_unit -v 1048576 file2
+setfattr -n ceph.file.layout.stripe_count -v 8 file2
+setfattr -n ceph.file.layout.object_size -v 10485760 file2
+
+setfattr -n ceph.file.layout.pool -v $datapool file2
+getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
+setfattr -n ceph.file.layout.pool_namespace -v foons file2
+getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons
+setfattr -x ceph.file.layout.pool_namespace file2
+getfattr -n ceph.file.layout.pool_namespace file2 | grep -q -v foons
+
+getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 1048576
+getfattr -n ceph.file.layout.stripe_count file2 | grep -q 8
+getfattr -n ceph.file.layout.object_size file2 | grep -q 10485760
+
+setfattr -n ceph.file.layout -v "stripe_unit=4194304 stripe_count=16 object_size=41943040 pool=$datapool pool_namespace=foons" file2
+getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 4194304
+getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16
+getfattr -n ceph.file.layout.object_size file2 | grep -q 41943040
+getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
+getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons
+
+setfattr -n ceph.file.layout -v "stripe_unit=1048576" file2
+getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 1048576
+getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16
+getfattr -n ceph.file.layout.object_size file2 | grep -q 41943040
+getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
+getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons
+
+setfattr -n ceph.file.layout -v "stripe_unit=2097152 stripe_count=4 object_size=2097152 pool=$datapool pool_namespace=barns" file2
+getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 2097152
+getfattr -n ceph.file.layout.stripe_count file2 | grep -q 4
+getfattr -n ceph.file.layout.object_size file2 | grep -q 2097152
+getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
+getfattr -n ceph.file.layout.pool_namespace file2 | grep -q barns
+
+# dir
+rm -f dir/file || true
+rmdir dir || true
+mkdir -p dir
+
+getfattr -d -m - dir | grep -q ceph.dir.layout && exit 1 || true
+getfattr -d -m - dir | grep -q ceph.file.layout && exit 1 || true
+getfattr -n ceph.dir.layout dir && exit 1 || true
+
+setfattr -n ceph.dir.layout.stripe_unit -v 1048576 dir
+setfattr -n ceph.dir.layout.stripe_count -v 8 dir
+setfattr -n ceph.dir.layout.object_size -v 10485760 dir
+setfattr -n ceph.dir.layout.pool -v $datapool dir
+setfattr -n ceph.dir.layout.pool_namespace -v dirns dir
+
+getfattr -n ceph.dir.layout dir
+getfattr -n ceph.dir.layout dir | grep -q object_size=10485760
+getfattr -n ceph.dir.layout dir | grep -q stripe_count=8
+getfattr -n ceph.dir.layout dir | grep -q stripe_unit=1048576
+getfattr -n ceph.dir.layout dir | grep -q pool=$datapool
+getfattr -n ceph.dir.layout dir | grep -q pool_namespace=dirns
+getfattr -n ceph.dir.layout.pool dir | grep -q $datapool
+getfattr -n ceph.dir.layout.stripe_unit dir | grep -q 1048576
+getfattr -n ceph.dir.layout.stripe_count dir | grep -q 8
+getfattr -n ceph.dir.layout.object_size dir | grep -q 10485760
+getfattr -n ceph.dir.layout.pool_namespace dir | grep -q dirns
+
+
+setfattr -n ceph.file.layout -v "stripe_count=16" file2
+getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16
+setfattr -n ceph.file.layout -v "object_size=10485760 stripe_count=8 stripe_unit=1048576 pool=$datapool pool_namespace=dirns" file2
+getfattr -n ceph.file.layout.stripe_count file2 | grep -q 8
+
+touch dir/file
+getfattr -n ceph.file.layout.pool dir/file | grep -q $datapool
+getfattr -n ceph.file.layout.stripe_unit dir/file | grep -q 1048576
+getfattr -n ceph.file.layout.stripe_count dir/file | grep -q 8
+getfattr -n ceph.file.layout.object_size dir/file | grep -q 10485760
+getfattr -n ceph.file.layout.pool_namespace dir/file | grep -q dirns
+
+setfattr -x ceph.dir.layout.pool_namespace dir
+getfattr -n ceph.dir.layout dir | grep -q -v pool_namespace=dirns
+
+setfattr -x ceph.dir.layout dir
+getfattr -n ceph.dir.layout dir 2>&1 | grep -q 'No such attribute'
+
+echo OK
+
diff --git a/qa/workunits/fs/misc/mkpool_layout_vxattrs.sh b/qa/workunits/fs/misc/mkpool_layout_vxattrs.sh
new file mode 100755
index 000000000..6b2fecbc0
--- /dev/null
+++ b/qa/workunits/fs/misc/mkpool_layout_vxattrs.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+
+set -e
+
+touch foo.$$
+ceph osd pool create foo.$$ 8
+ceph fs add_data_pool cephfs foo.$$
+setfattr -n ceph.file.layout.pool -v foo.$$ foo.$$
+
+# cleanup
+rm foo.$$
+ceph fs rm_data_pool cephfs foo.$$
+ceph osd pool rm foo.$$ foo.$$ --yes-i-really-really-mean-it
+
+echo OK
diff --git a/qa/workunits/fs/misc/multiple_rsync.sh b/qa/workunits/fs/misc/multiple_rsync.sh
new file mode 100755
index 000000000..4397c1e7f
--- /dev/null
+++ b/qa/workunits/fs/misc/multiple_rsync.sh
@@ -0,0 +1,25 @@
+#!/bin/sh -ex
+
+
+# Populate with some arbitrary files from the local system. Take
+# a copy to protect against false fails from system updates during test.
+export PAYLOAD=/tmp/multiple_rsync_payload.$$
+sudo cp -r /usr/lib/ $PAYLOAD
+
+set -e
+
+sudo rsync -av $PAYLOAD payload.1
+sudo rsync -av $PAYLOAD payload.2
+
+# this shouldn't transfer any additional files
+echo we should get 4 here if no additional files are transferred
+sudo rsync -auv $PAYLOAD payload.1 | tee /tmp/$$
+hexdump -C /tmp/$$
+wc -l /tmp/$$ | grep 4
+sudo rsync -auv $PAYLOAD payload.2 | tee /tmp/$$
+hexdump -C /tmp/$$
+wc -l /tmp/$$ | grep 4
+echo OK
+
+rm /tmp/$$
+sudo rm -rf $PAYLOAD
diff --git a/qa/workunits/fs/misc/rstats.sh b/qa/workunits/fs/misc/rstats.sh
new file mode 100755
index 000000000..e6b3eddf2
--- /dev/null
+++ b/qa/workunits/fs/misc/rstats.sh
@@ -0,0 +1,80 @@
+#!/usr/bin/env bash
+
+set -x
+
+timeout=30
+old_value=""
+new_value=""
+
+wait_until_changed() {
+ name=$1
+ wait=0
+ while [ $wait -lt $timeout ]; do
+ new_value=`getfattr --only-value -n ceph.dir.$name .`
+ [ $new_value == $old_value ] || return 0
+ sleep 1
+ wait=$(($wait + 1))
+ done
+ return 1
+}
+
+check_rctime() {
+ old_sec=$(echo $old_value | cut -d. -f1)
+ old_nsec=$(echo $old_value | cut -d. -f2)
+ new_sec=$(echo $new_value | cut -d. -f1)
+ new_nsec=$(echo $new_value | cut -d. -f2)
+ [ "$old_sec" -lt "$new_sec" ] && return 0
+ [ "$old_sec" -gt "$new_sec" ] && return 1
+ [ "$old_nsec" -lt "$new_nsec" ] && return 0
+ return 1
+}
+
+# sync(3) does not make ceph-fuse flush dirty caps, because fuse kernel module
+# does not notify ceph-fuse about it. Use fsync(3) instead.
+fsync_path() {
+ cmd="import os; fd=os.open(\"$1\", os.O_RDONLY); os.fsync(fd); os.close(fd)"
+ python3 -c "$cmd"
+}
+
+set -e
+
+mkdir -p rstats_testdir/d1/d2
+cd rstats_testdir
+
+# rfiles
+old_value=`getfattr --only-value -n ceph.dir.rfiles .`
+[ $old_value == 0 ] || false
+touch d1/d2/f1
+wait_until_changed rfiles
+[ $new_value == $(($old_value + 1)) ] || false
+
+# rsubdirs
+old_value=`getfattr --only-value -n ceph.dir.rsubdirs .`
+[ $old_value == 3 ] || false
+mkdir d1/d2/d3
+wait_until_changed rsubdirs
+[ $new_value == $(($old_value + 1)) ] || false
+
+# rbytes
+old_value=`getfattr --only-value -n ceph.dir.rbytes .`
+[ $old_value == 0 ] || false
+echo hello > d1/d2/f2
+fsync_path d1/d2/f2
+wait_until_changed rbytes
+[ $new_value == $(($old_value + 6)) ] || false
+
+#rctime
+old_value=`getfattr --only-value -n ceph.dir.rctime .`
+touch d1/d2/d3 # touch existing file
+fsync_path d1/d2/d3
+wait_until_changed rctime
+check_rctime
+
+old_value=`getfattr --only-value -n ceph.dir.rctime .`
+touch d1/d2/f3 # create new file
+wait_until_changed rctime
+check_rctime
+
+cd ..
+rm -rf rstats_testdir
+echo OK
diff --git a/qa/workunits/fs/misc/trivial_sync.sh b/qa/workunits/fs/misc/trivial_sync.sh
new file mode 100755
index 000000000..7c8c4e2b4
--- /dev/null
+++ b/qa/workunits/fs/misc/trivial_sync.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+set -e
+
+mkdir foo
+echo foo > bar
+sync
diff --git a/qa/workunits/fs/misc/xattrs.sh b/qa/workunits/fs/misc/xattrs.sh
new file mode 100755
index 000000000..fcd94d22c
--- /dev/null
+++ b/qa/workunits/fs/misc/xattrs.sh
@@ -0,0 +1,14 @@
+#!/bin/sh -x
+
+set -e
+
+touch file
+
+setfattr -n user.foo -v foo file
+setfattr -n user.bar -v bar file
+setfattr -n user.empty file
+getfattr -d file | grep foo
+getfattr -d file | grep bar
+getfattr -d file | grep empty
+
+echo OK.
diff --git a/qa/workunits/fs/multiclient_sync_read_eof.py b/qa/workunits/fs/multiclient_sync_read_eof.py
new file mode 100755
index 000000000..15ecbb825
--- /dev/null
+++ b/qa/workunits/fs/multiclient_sync_read_eof.py
@@ -0,0 +1,42 @@
+#!/usr/bin/python3
+
+import argparse
+import os
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('mnt1')
+ parser.add_argument('mnt2')
+ parser.add_argument('fn')
+ args = parser.parse_args()
+
+ open(os.path.join(args.mnt1, args.fn), 'w')
+ f1 = open(os.path.join(args.mnt1, args.fn), 'r+')
+ f2 = open(os.path.join(args.mnt2, args.fn), 'r+')
+
+ f1.write('foo')
+ f1.flush()
+ a = f2.read(3)
+ print('got "%s"' % a)
+ assert a == 'foo'
+ f2.write('bar')
+ f2.flush()
+ a = f1.read(3)
+ print('got "%s"' % a)
+ assert a == 'bar'
+
+ ## test short reads
+ f1.write('short')
+ f1.flush()
+ a = f2.read(100)
+ print('got "%s"' % a)
+ assert a == 'short'
+ f2.write('longer')
+ f2.flush()
+ a = f1.read(1000)
+ print('got "%s"' % a)
+ assert a == 'longer'
+
+ print('ok')
+
+main()
diff --git a/qa/workunits/fs/norstats/kernel_untar_tar.sh b/qa/workunits/fs/norstats/kernel_untar_tar.sh
new file mode 100755
index 000000000..6a175dcd9
--- /dev/null
+++ b/qa/workunits/fs/norstats/kernel_untar_tar.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+# check if there is file changed while being archived
+
+set -e
+
+KERNEL=linux-4.0.5
+
+wget -q http://download.ceph.com/qa/$KERNEL.tar.xz
+
+mkdir untar_tar
+cd untar_tar
+
+tar Jxvf ../$KERNEL.tar.xz $KERNEL/Documentation/
+tar cf doc.tar $KERNEL
+
+tar xf doc.tar
+sync
+tar c $KERNEL >/dev/null
+
+rm -rf $KERNEL
+
+tar xf doc.tar
+sync
+tar c $KERNEL >/dev/null
+
+echo Ok
diff --git a/qa/workunits/fs/quota/quota.sh b/qa/workunits/fs/quota/quota.sh
new file mode 100755
index 000000000..1315be6d8
--- /dev/null
+++ b/qa/workunits/fs/quota/quota.sh
@@ -0,0 +1,128 @@
+#!/usr/bin/env bash
+
+set -ex
+
+function expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+function write_file()
+{
+ set +x
+ for ((i=1;i<=$2;i++))
+ do
+ dd if=/dev/zero of=$1 bs=1M count=1 conv=notrunc oflag=append 2>/dev/null >/dev/null
+ if [ $? != 0 ]; then
+ echo Try to write $(($i * 1048576))
+ set -x
+ return 1
+ fi
+ sleep 0.05
+ done
+ set -x
+ return 0
+}
+
+mkdir quota-test
+cd quota-test
+
+# bytes
+setfattr . -n ceph.quota.max_bytes -v 100000000 # 100m
+expect_false write_file big 1000 # 1g
+expect_false write_file second 10
+setfattr . -n ceph.quota.max_bytes -v 0
+dd if=/dev/zero of=third bs=1M count=10
+dd if=/dev/zero of=big2 bs=1M count=100
+
+
+rm -rf *
+
+# files
+setfattr . -n ceph.quota.max_files -v 5
+mkdir ok
+touch ok/1
+touch ok/2
+touch 3
+expect_false touch shouldbefail # 5 files will include the "."
+expect_false touch ok/shouldbefail # 5 files will include the "."
+setfattr . -n ceph.quota.max_files -v 0
+touch shouldbecreated
+touch shouldbecreated2
+
+
+rm -rf *
+
+# mix
+mkdir bytes bytes/files
+
+setfattr bytes -n ceph.quota.max_bytes -v 10000000 #10m
+setfattr bytes/files -n ceph.quota.max_files -v 5
+dd if=/dev/zero of=bytes/files/1 bs=1M count=4
+dd if=/dev/zero of=bytes/files/2 bs=1M count=4
+expect_false write_file bytes/files/3 1000
+expect_false write_file bytes/files/4 1000
+expect_false write_file bytes/files/5 1000
+stat --printf="%n %s\n" bytes/files/1 #4M
+stat --printf="%n %s\n" bytes/files/2 #4M
+stat --printf="%n %s\n" bytes/files/3 #bigger than 2M
+stat --printf="%n %s\n" bytes/files/4 #should be zero
+expect_false stat bytes/files/5 #shouldn't be exist
+
+
+
+
+rm -rf *
+
+#mv
+mkdir files limit
+truncate files/file -s 10G
+setfattr limit -n ceph.quota.max_bytes -v 1000000 #1m
+expect_false mv files limit/
+
+
+
+rm -rf *
+
+#limit by ancestor
+
+mkdir -p ancestor/p1/p2/parent/p3
+setfattr ancestor -n ceph.quota.max_bytes -v 1000000
+setfattr ancestor/p1/p2/parent -n ceph.quota.max_bytes -v 1000000000 #1g
+expect_false write_file ancestor/p1/p2/parent/p3/file1 900 #900m
+stat --printf="%n %s\n" ancestor/p1/p2/parent/p3/file1
+
+
+#get/set attribute
+
+setfattr -n ceph.quota.max_bytes -v 0 .
+setfattr -n ceph.quota.max_bytes -v 1 .
+setfattr -n ceph.quota.max_bytes -v 9223372036854775807 .
+expect_false setfattr -n ceph.quota.max_bytes -v 9223372036854775808 .
+expect_false setfattr -n ceph.quota.max_bytes -v -1 .
+expect_false setfattr -n ceph.quota.max_bytes -v -9223372036854775808 .
+expect_false setfattr -n ceph.quota.max_bytes -v -9223372036854775809 .
+
+setfattr -n ceph.quota.max_files -v 0 .
+setfattr -n ceph.quota.max_files -v 1 .
+setfattr -n ceph.quota.max_files -v 9223372036854775807 .
+expect_false setfattr -n ceph.quota.max_files -v 9223372036854775808 .
+expect_false setfattr -n ceph.quota.max_files -v -1 .
+expect_false setfattr -n ceph.quota.max_files -v -9223372036854775808 .
+expect_false setfattr -n ceph.quota.max_files -v -9223372036854775809 .
+
+setfattr -n ceph.quota -v "max_bytes=0 max_files=0" .
+setfattr -n ceph.quota -v "max_bytes=1 max_files=0" .
+setfattr -n ceph.quota -v "max_bytes=0 max_files=1" .
+setfattr -n ceph.quota -v "max_bytes=1 max_files=1" .
+expect_false setfattr -n ceph.quota -v "max_bytes=-1 max_files=0" .
+expect_false setfattr -n ceph.quota -v "max_bytes=0 max_files=-1" .
+expect_false setfattr -n ceph.quota -v "max_bytes=-1 max_files=-1" .
+
+#addme
+
+cd ..
+rm -rf quota-test
+
+echo OK
diff --git a/qa/workunits/fs/snap-hierarchy.sh b/qa/workunits/fs/snap-hierarchy.sh
new file mode 100755
index 000000000..67f0e014b
--- /dev/null
+++ b/qa/workunits/fs/snap-hierarchy.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+set -ex
+
+if [ -d "$1" ]; then
+ mkdir -p -- "$1" && cd "$1"
+fi
+
+[ "$VERIFY" != verify ] && mkdir 1
+[ "$VERIFY" != verify ] && mkdir 1/.snap/first
+stat 1/.snap/first
+[ "$VERIFY" != verify ] && mkdir 1/2
+stat 1/.snap/first/2 && exit 1
+[ "$VERIFY" != verify ] && mkdir 1/2/.snap/second
+stat 1/2/.snap/second
+[ "$VERIFY" != verify ] && touch 1/foo
+stat 1/.snap/first/foo && exit 1
+[ "$VERIFY" != verify ] && mkdir 1/.snap/third
+stat 1/.snap/third/foo || exit 1
+[ "$VERIFY" != verify ] && mkdir 1/2/3
+[ "$VERIFY" != verify ] && mkdir 1/2/.snap/fourth
+stat 1/2/.snap/fourth/3
+
+exit 0
diff --git a/qa/workunits/fs/snaps/snap-rm-diff.sh b/qa/workunits/fs/snaps/snap-rm-diff.sh
new file mode 100755
index 000000000..30ffa9113
--- /dev/null
+++ b/qa/workunits/fs/snaps/snap-rm-diff.sh
@@ -0,0 +1,10 @@
+#!/bin/sh -ex
+
+wget -q http://download.ceph.com/qa/linux-2.6.33.tar.bz2
+mkdir foo
+cp linux* foo
+mkdir foo/.snap/barsnap
+rm foo/linux*
+diff -q foo/.snap/barsnap/linux* linux* && echo "passed: files are identical"
+rmdir foo/.snap/barsnap
+echo OK
diff --git a/qa/workunits/fs/snaps/snaptest-1.sh b/qa/workunits/fs/snaps/snaptest-1.sh
new file mode 100755
index 000000000..431e83387
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-1.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+set -ex
+
+echo 1 > file1
+echo 2 > file2
+echo 3 > file3
+[ -e file4 ] && rm file4
+mkdir .snap/snap1
+echo 4 > file4
+now=`ls`
+then=`ls .snap/snap1`
+rmdir .snap/snap1
+if [ "$now" = "$then" ]; then
+ echo live and snap contents are identical?
+ false
+fi
+
+# do it again
+echo 1 > file1
+echo 2 > file2
+echo 3 > file3
+mkdir .snap/snap1
+echo 4 > file4
+rmdir .snap/snap1
+
+rm file?
+
+echo OK
diff --git a/qa/workunits/fs/snaps/snaptest-2.sh b/qa/workunits/fs/snaps/snaptest-2.sh
new file mode 100755
index 000000000..11fe9316a
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-2.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+
+echo "Create dir 100 to 199 ..."
+for i in $(seq 100 199); do
+ echo " create dir $i"
+ mkdir "$i"
+ for y in $(seq 10 20); do
+ echo "This is a test file before any snapshot was taken." >"$i/$y"
+ done
+done
+
+echo "Take first snapshot .snap/test1"
+mkdir .snap/test1
+
+echo "Create dir 200 to 299 ..."
+for i in $(seq 200 299); do
+ echo " create dir $i"
+ mkdir $i
+ for y in $(seq 20 29); do
+ echo "This is a test file. Created after .snap/test1" >"$i/$y"
+ done
+done
+
+echo "Create a snapshot in every first level dir ..."
+for dir in $(ls); do
+ echo " create $dir/.snap/snap-subdir-test"
+ mkdir "$dir/.snap/snap-subdir-test"
+ for y in $(seq 30 39); do
+ echo " create $dir/$y file after the snapshot"
+ echo "This is a test file. Created after $dir/.snap/snap-subdir-test" >"$dir/$y"
+ done
+done
+
+echo "Take second snapshot .snap/test2"
+mkdir .snap/test2
+
+echo "Copy content of .snap/test1 to copyofsnap1 ..."
+mkdir copyofsnap1
+cp -Rv .snap/test1 copyofsnap1/
+
+
+echo "Take third snapshot .snap/test3"
+mkdir .snap/test3
+
+echo "Delete the snapshots..."
+
+find ./ -type d -print | \
+ xargs -I% -n1 find %/.snap -mindepth 1 -maxdepth 1 \
+ \( ! -name "_*" \) -print 2>/dev/null
+
+find ./ -type d -print | \
+ xargs -I% -n1 find %/.snap -mindepth 1 -maxdepth 1 \
+ \( ! -name "_*" \) -print 2>/dev/null | \
+ xargs -n1 rmdir
+
+echo "Delete all the files and directories ..."
+rm -Rfv ./*
+
+echo OK
diff --git a/qa/workunits/fs/snaps/snaptest-authwb.sh b/qa/workunits/fs/snaps/snaptest-authwb.sh
new file mode 100755
index 000000000..965ee8512
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-authwb.sh
@@ -0,0 +1,12 @@
+#!/bin/sh -x
+
+set -e
+
+touch foo
+chmod +x foo
+mkdir .snap/s
+find .snap/s/foo -executable | grep foo
+rmdir .snap/s
+rm foo
+
+echo OK
diff --git a/qa/workunits/fs/snaps/snaptest-capwb.sh b/qa/workunits/fs/snaps/snaptest-capwb.sh
new file mode 100755
index 000000000..d26f324b6
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-capwb.sh
@@ -0,0 +1,33 @@
+#!/bin/sh -x
+
+set -e
+
+mkdir foo
+
+# make sure mds handles it when the client does not send flushsnap
+echo x > foo/x
+sync
+mkdir foo/.snap/ss
+ln foo/x foo/xx
+cat foo/.snap/ss/x
+rmdir foo/.snap/ss
+
+#
+echo a > foo/a
+echo b > foo/b
+mkdir foo/.snap/s
+r=`cat foo/.snap/s/a`
+[ -z "$r" ] && echo "a appears empty in snapshot" && false
+
+ln foo/b foo/b2
+cat foo/.snap/s/b
+
+echo "this used to hang:"
+echo more >> foo/b2
+echo "oh, it didn't hang! good job."
+cat foo/b
+rmdir foo/.snap/s
+
+rm -r foo
+
+echo OK
diff --git a/qa/workunits/fs/snaps/snaptest-dir-rename.sh b/qa/workunits/fs/snaps/snaptest-dir-rename.sh
new file mode 100755
index 000000000..3bbd9a11e
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-dir-rename.sh
@@ -0,0 +1,17 @@
+#!/bin/sh -x
+
+set -e
+
+#
+# make sure we keep an existing dn's seq
+#
+
+mkdir a
+mkdir .snap/bar
+mkdir a/.snap/foo
+rmdir a/.snap/foo
+rmdir a
+stat .snap/bar/a
+rmdir .snap/bar
+
+echo OK
diff --git a/qa/workunits/fs/snaps/snaptest-double-null.sh b/qa/workunits/fs/snaps/snaptest-double-null.sh
new file mode 100755
index 000000000..cdf32e4f0
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-double-null.sh
@@ -0,0 +1,23 @@
+#!/bin/sh -x
+
+set -e
+
+# multiple intervening snapshots with no modifications, and thus no
+# snapflush client_caps messages. make sure the mds can handle this.
+
+for f in `seq 1 20` ; do
+
+mkdir a
+cat > a/foo &
+mkdir a/.snap/one
+mkdir a/.snap/two
+chmod 777 a/foo
+sync # this might crash the mds
+ps
+rmdir a/.snap/*
+rm a/foo
+rmdir a
+
+done
+
+echo OK
diff --git a/qa/workunits/fs/snaps/snaptest-estale.sh b/qa/workunits/fs/snaps/snaptest-estale.sh
new file mode 100755
index 000000000..a4fb94368
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-estale.sh
@@ -0,0 +1,13 @@
+#!/bin/sh -x
+
+mkdir .snap/foo
+
+echo "We want ENOENT, not ESTALE, here."
+for f in `seq 1 100`
+do
+ stat .snap/foo/$f 2>&1 | grep 'No such file'
+done
+
+rmdir .snap/foo
+
+echo "OK"
diff --git a/qa/workunits/fs/snaps/snaptest-git-ceph.sh b/qa/workunits/fs/snaps/snaptest-git-ceph.sh
new file mode 100755
index 000000000..12c1f0fdc
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-git-ceph.sh
@@ -0,0 +1,52 @@
+#!/bin/sh -x
+
+set -e
+
+# try it again if the clone is slow and the second time
+retried=false
+trap -- 'retry' EXIT
+retry() {
+ rm -rf ceph
+ # double the timeout value
+ timeout 3600 git clone https://git.ceph.com/ceph.git
+}
+rm -rf ceph
+timeout 1800 git clone https://git.ceph.com/ceph.git
+trap - EXIT
+cd ceph
+
+versions=`seq 1 90`
+
+for v in $versions
+do
+ if [ $v -eq 48 ]; then
+ continue
+ fi
+ ver="v0.$v"
+ echo $ver
+ git reset --hard $ver
+ mkdir .snap/$ver
+done
+
+for v in $versions
+do
+ if [ $v -eq 48 ]; then
+ continue
+ fi
+ ver="v0.$v"
+ echo checking $ver
+ cd .snap/$ver
+ git diff --exit-code
+ cd ../..
+done
+
+for v in $versions
+do
+ if [ $v -eq 48 ]; then
+ continue
+ fi
+ ver="v0.$v"
+ rmdir .snap/$ver
+done
+
+echo OK
diff --git a/qa/workunits/fs/snaps/snaptest-hardlink.sh b/qa/workunits/fs/snaps/snaptest-hardlink.sh
new file mode 100755
index 000000000..90f3583b1
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-hardlink.sh
@@ -0,0 +1,25 @@
+#!/bin/sh -x
+
+set -e
+
+mkdir 1 2
+echo asdf >1/file1
+echo asdf >1/file2
+
+ln 1/file1 2/file1
+ln 1/file2 2/file2
+
+mkdir 2/.snap/s1
+
+echo qwer >1/file1
+grep asdf 2/.snap/s1/file1
+
+rm -f 1/file2
+grep asdf 2/.snap/s1/file2
+rm -f 2/file2
+grep asdf 2/.snap/s1/file2
+
+rmdir 2/.snap/s1
+rm -rf 1 2
+
+echo OK
diff --git a/qa/workunits/fs/snaps/snaptest-intodir.sh b/qa/workunits/fs/snaps/snaptest-intodir.sh
new file mode 100755
index 000000000..d6a220f73
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-intodir.sh
@@ -0,0 +1,22 @@
+#!/bin/sh -ex
+
+# this tests fix for #1399
+mkdir foo
+mkdir foo/.snap/one
+touch bar
+mv bar foo
+sync
+# should not crash :)
+
+mkdir baz
+mkdir baz/.snap/two
+mv baz foo
+sync
+# should not crash :)
+
+# clean up.
+rmdir foo/baz/.snap/two
+rmdir foo/.snap/one
+rm -r foo
+
+echo OK
diff --git a/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh b/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh
new file mode 100755
index 000000000..5ebc852cf
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh
@@ -0,0 +1,42 @@
+#!/bin/sh -x
+
+set -e
+
+echo asdf > a
+mkdir .snap/1
+chmod 777 a
+mkdir .snap/2
+echo qwer > a
+mkdir .snap/3
+chmod 666 a
+mkdir .snap/4
+echo zxcv > a
+mkdir .snap/5
+
+ls -al .snap/?/a
+
+grep asdf .snap/1/a
+stat .snap/1/a | grep 'Size: 5'
+
+grep asdf .snap/2/a
+stat .snap/2/a | grep 'Size: 5'
+stat .snap/2/a | grep -- '-rwxrwxrwx'
+
+grep qwer .snap/3/a
+stat .snap/3/a | grep 'Size: 5'
+stat .snap/3/a | grep -- '-rwxrwxrwx'
+
+grep qwer .snap/4/a
+stat .snap/4/a | grep 'Size: 5'
+stat .snap/4/a | grep -- '-rw-rw-rw-'
+
+grep zxcv .snap/5/a
+stat .snap/5/a | grep 'Size: 5'
+stat .snap/5/a | grep -- '-rw-rw-rw-'
+
+rmdir .snap/[12345]
+
+echo "OK"
+
+
+
diff --git a/qa/workunits/fs/snaps/snaptest-name-limits.sh b/qa/workunits/fs/snaps/snaptest-name-limits.sh
new file mode 100755
index 000000000..f40d0231e
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-name-limits.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+#
+# This tests snapshot names limits: names have to be < 240 chars
+#
+
+function cleanup ()
+{
+ rmdir d1/.snap/*
+ rm -rf d1
+}
+
+function fail ()
+{
+ echo $@
+ cleanup
+ exit 1
+}
+
+mkdir d1
+
+longname=$(printf "%.241d" 2)
+mkdir d1/.snap/$longname 2> /dev/null
+[ -d d1/.snap/$longname ] && fail "Invalid snapshot exists: $longname"
+
+cleanup
+
+echo OK
diff --git a/qa/workunits/fs/snaps/snaptest-parents.sh b/qa/workunits/fs/snaps/snaptest-parents.sh
new file mode 100755
index 000000000..7ab1ba7cf
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-parents.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+set -e
+
+echo "making directory tree and files"
+mkdir -p 1/a/b/c/
+echo "i'm file1" > 1/a/file1
+echo "i'm file2" > 1/a/b/file2
+echo "i'm file3" > 1/a/b/c/file3
+echo "snapshotting"
+mkdir 1/.snap/foosnap1
+mkdir 2
+echo "moving tree"
+mv 1/a 2
+echo "checking snapshot contains tree..."
+dir1=`find 1/.snap/foosnap1 | wc -w`
+dir2=`find 2/ | wc -w`
+#diff $dir1 $dir2 && echo "Success!"
+test $dir1==$dir2 && echo "Success!"
+echo "adding folder and file to tree..."
+mkdir 2/a/b/c/d
+echo "i'm file 4!" > 2/a/b/c/d/file4
+echo "snapshotting tree 2"
+mkdir 2/.snap/barsnap2
+echo "comparing snapshots"
+dir1=`find 1/.snap/foosnap1/ -maxdepth 2 | wc -w`
+dir2=`find 2/.snap/barsnap2/ -maxdepth 2 | wc -w`
+#diff $dir1 $dir2 && echo "Success!"
+test $dir1==$dir2 && echo "Success!"
+echo "moving subtree to first folder"
+mv 2/a/b/c 1
+echo "comparing snapshots and new tree"
+dir1=`find 1/ | wc -w`
+dir2=`find 2/.snap/barsnap2/a/b/c | wc -w`
+#diff $dir1 $dir2 && echo "Success!"
+test $dir1==$dir2 && echo "Success!"
+rmdir 1/.snap/*
+rmdir 2/.snap/*
+echo "OK"
diff --git a/qa/workunits/fs/snaps/snaptest-realm-split.sh b/qa/workunits/fs/snaps/snaptest-realm-split.sh
new file mode 100755
index 000000000..300cca21d
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-realm-split.sh
@@ -0,0 +1,31 @@
+#!/bin/sh -x
+
+set -e
+
+mkdir -p 1/a
+exec 3<> 1/a/file1
+
+echo -n a >&3
+
+mkdir 1/.snap/s1
+
+echo -n b >&3
+
+mkdir 2
+# create new snaprealm at dir a, file1's cap should be attached to the new snaprealm
+mv 1/a 2
+
+mkdir 2/.snap/s2
+
+echo -n c >&3
+
+exec 3>&-
+
+grep '^a$' 1/.snap/s1/a/file1
+grep '^ab$' 2/.snap/s2/a/file1
+grep '^abc$' 2/a/file1
+
+rmdir 1/.snap/s1
+rmdir 2/.snap/s2
+rm -rf 1 2
+echo OK
diff --git a/qa/workunits/fs/snaps/snaptest-snap-rename.sh b/qa/workunits/fs/snaps/snaptest-snap-rename.sh
new file mode 100755
index 000000000..aa7325b92
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-snap-rename.sh
@@ -0,0 +1,33 @@
+#!/bin/sh -x
+
+expect_failure() {
+ if "$@"; then return 1; else return 0; fi
+}
+set -e
+
+mkdir -p d1/d2
+mkdir -p d1/d3
+mkdir d1/.snap/foo
+mkdir d1/d2/.snap/foo
+mkdir d1/d3/.snap/foo
+mkdir d1/d3/.snap/bar
+mv d1/d2/.snap/foo d1/d2/.snap/bar
+# snapshot name can't start with _
+expect_failure mv d1/d2/.snap/bar d1/d2/.snap/_bar
+# can't rename parent snapshot
+expect_failure mv d1/d2/.snap/_foo_* d1/d2/.snap/foo
+expect_failure mv d1/d2/.snap/_foo_* d1/d2/.snap/_foo_1
+# can't rename snapshot to different directroy
+expect_failure mv d1/d2/.snap/bar d1/.snap/
+# can't overwrite existing snapshot
+expect_failure python3 -c "import os; os.rename('d1/d3/.snap/foo', 'd1/d3/.snap/bar')"
+# can't move snaphost out of snapdir
+expect_failure python3 -c "import os; os.rename('d1/.snap/foo', 'd1/foo')"
+
+rmdir d1/.snap/foo
+rmdir d1/d2/.snap/bar
+rmdir d1/d3/.snap/foo
+rmdir d1/d3/.snap/bar
+rm -rf d1
+
+echo OK
diff --git a/qa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh b/qa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh
new file mode 100755
index 000000000..88a0e8ae5
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh
@@ -0,0 +1,24 @@
+#!/bin/sh -x
+
+set -e
+
+file=linux-2.6.33.tar.bz2
+wget -q http://download.ceph.com/qa/$file
+
+real=`md5sum $file | awk '{print $1}'`
+
+for f in `seq 1 20`
+do
+ echo $f
+ cp $file a
+ mkdir .snap/s
+ rm a
+ cp .snap/s/a /tmp/a
+ cur=`md5sum /tmp/a | awk '{print $1}'`
+ if [ "$cur" != "$real" ]; then
+ echo "FAIL: bad match, /tmp/a $cur != real $real"
+ false
+ fi
+ rmdir .snap/s
+done
+rm $file
diff --git a/qa/workunits/fs/snaps/snaptest-upchildrealms.sh b/qa/workunits/fs/snaps/snaptest-upchildrealms.sh
new file mode 100755
index 000000000..4e531a966
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-upchildrealms.sh
@@ -0,0 +1,28 @@
+#!/bin/sh -x
+
+set -e
+
+#
+# verify that a snap update on a parent realm will induce
+# snap cap writeback for inodes child realms
+#
+
+mkdir a
+mkdir a/b
+mkdir a/.snap/a1
+mkdir a/b/.snap/b1
+echo asdf > a/b/foo
+mkdir a/.snap/a2
+# client _should_ have just queued a capsnap for writeback
+ln a/b/foo a/b/bar # make the server cow the inode
+
+echo "this should not hang..."
+cat a/b/.snap/_a2_*/foo
+echo "good, it did not hang."
+
+rmdir a/b/.snap/b1
+rmdir a/.snap/a1
+rmdir a/.snap/a2
+rm -r a
+
+echo "OK"
diff --git a/qa/workunits/fs/snaps/snaptest-xattrwb.sh b/qa/workunits/fs/snaps/snaptest-xattrwb.sh
new file mode 100755
index 000000000..e503aed77
--- /dev/null
+++ b/qa/workunits/fs/snaps/snaptest-xattrwb.sh
@@ -0,0 +1,29 @@
+#!/bin/sh -x
+
+set -e
+
+echo "testing simple xattr wb"
+touch x
+setfattr -n user.foo x
+mkdir .snap/s1
+getfattr -n user.foo .snap/s1/x | grep user.foo
+rm x
+rmdir .snap/s1
+
+echo "testing wb with pre-wb server cow"
+mkdir a
+mkdir a/b
+mkdir a/b/c
+# b now has As but not Ax
+setfattr -n user.foo a/b
+mkdir a/.snap/s
+mkdir a/b/cc
+# b now has been cowed on the server, but we still have dirty xattr caps
+getfattr -n user.foo a/b # there they are...
+getfattr -n user.foo a/.snap/s/b | grep user.foo # should be there, too!
+
+# ok, clean up
+rmdir a/.snap/s
+rm -r a
+
+echo OK
diff --git a/qa/workunits/fs/snaps/untar_snap_rm.sh b/qa/workunits/fs/snaps/untar_snap_rm.sh
new file mode 100755
index 000000000..8a8412e66
--- /dev/null
+++ b/qa/workunits/fs/snaps/untar_snap_rm.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+set -e
+
+do_tarball() {
+ wget http://download.ceph.com/qa/$1
+ tar xvf$2 $1
+ mkdir .snap/k
+ sync
+ rm -rv $3
+ cp -av .snap/k .
+ rmdir .snap/k
+ rm -rv k
+ rm $1
+}
+
+do_tarball coreutils_8.5.orig.tar.gz z coreutils-8.5
+do_tarball linux-2.6.33.tar.bz2 j linux-2.6.33
diff --git a/qa/workunits/fs/test_o_trunc.c b/qa/workunits/fs/test_o_trunc.c
new file mode 100644
index 000000000..1ce19e4bb
--- /dev/null
+++ b/qa/workunits/fs/test_o_trunc.c
@@ -0,0 +1,45 @@
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+
+int main(int argc, char *argv[])
+{
+ char obuf[32], ibuf[1024];
+ int n, max = 0;
+
+ if (argc > 2)
+ max = atoi(argv[2]);
+ if (!max)
+ max = 600;
+
+ memset(obuf, 0xff, sizeof(obuf));
+
+ for (n = 1; n <= max; ++n) {
+ int fd, ret;
+ fd = open(argv[1], O_RDWR | O_CREAT | O_TRUNC, 0644);
+ printf("%d/%d: open fd = %d\n", n, max, fd);
+
+ ret = write(fd, obuf, sizeof(obuf));
+ printf("write ret = %d\n", ret);
+
+ sleep(1);
+
+ ret = write(fd, obuf, sizeof(obuf));
+ printf("write ret = %d\n", ret);
+
+ ret = pread(fd, ibuf, sizeof(ibuf), 0);
+ printf("pread ret = %d\n", ret);
+
+ if (memcmp(obuf, ibuf, sizeof(obuf))) {
+ printf("mismatch\n");
+ close(fd);
+ break;
+ }
+ close(fd);
+ }
+ return 0;
+}
diff --git a/qa/workunits/fs/test_o_trunc.sh b/qa/workunits/fs/test_o_trunc.sh
new file mode 100755
index 000000000..90a72600d
--- /dev/null
+++ b/qa/workunits/fs/test_o_trunc.sh
@@ -0,0 +1,7 @@
+#!/bin/sh -ex
+
+mydir=`dirname $0`
+$mydir/test_o_trunc trunc.foo 600
+
+echo OK
+
diff --git a/qa/workunits/fs/test_python.sh b/qa/workunits/fs/test_python.sh
new file mode 100755
index 000000000..6e39b95a4
--- /dev/null
+++ b/qa/workunits/fs/test_python.sh
@@ -0,0 +1,6 @@
+#!/bin/sh -ex
+
+# Running as root because the filesystem root directory will be
+# owned by uid 0, and that's where we're writing.
+sudo python3 -m pytest -v $(dirname $0)/../../../src/test/pybind/test_cephfs.py
+exit 0
diff --git a/qa/workunits/hadoop/repl.sh b/qa/workunits/hadoop/repl.sh
new file mode 100755
index 000000000..84f6150ab
--- /dev/null
+++ b/qa/workunits/hadoop/repl.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+
+# bail if $TESTDIR is not set as this test will fail in that scenario
+[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; }
+
+# if HADOOP_PREFIX is not set, use default
+[ -z $HADOOP_PREFIX ] && { HADOOP_PREFIX=$TESTDIR/hadoop; }
+
+# create pools with different replication factors
+for repl in 2 3 7 8 9; do
+ name=hadoop.$repl
+ ceph osd pool create $name 8 8
+ ceph osd pool set $name size $repl
+
+ id=`ceph osd dump | sed -n "s/^pool \([0-9]*\) '$name'.*/\1/p"`
+ ceph fs add_data_pool cephfs $id
+done
+
+# create a file in each of the pools
+for repl in 2 3 7 8 9; do
+ name=hadoop.$repl
+ $HADOOP_PREFIX/bin/hadoop fs -rm -f /$name.dat
+ dd if=/dev/zero bs=1048576 count=1 | \
+ $HADOOP_PREFIX/bin/hadoop fs -Dceph.data.pools="$name" \
+ -put - /$name.dat
+done
+
+# check that hadoop reports replication matching
+# that of the pool the file was written into
+for repl in 2 3 7 8 9; do
+ name=hadoop.$repl
+ repl2=$($HADOOP_PREFIX/bin/hadoop fs -ls /$name.dat | awk '{print $2}')
+ if [ $repl -ne $repl2 ]; then
+ echo "replication factors didn't match!"
+ exit 1
+ fi
+done
+
+exit 0
diff --git a/qa/workunits/hadoop/terasort.sh b/qa/workunits/hadoop/terasort.sh
new file mode 100755
index 000000000..3d6988a21
--- /dev/null
+++ b/qa/workunits/hadoop/terasort.sh
@@ -0,0 +1,76 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+
+INPUT=/terasort-input
+OUTPUT=/terasort-output
+REPORT=/tersort-report
+
+num_records=100000
+[ ! -z $NUM_RECORDS ] && num_records=$NUM_RECORDS
+
+# bail if $TESTDIR is not set as this test will fail in that scenario
+[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; }
+
+# if HADOOP_PREFIX is not set, use default
+[ -z $HADOOP_PREFIX ] && { HADOOP_PREFIX=$TESTDIR/hadoop; }
+
+# Nuke hadoop directories
+$HADOOP_PREFIX/bin/hadoop fs -rm -r $INPUT $OUTPUT $REPORT || true
+
+# Generate terasort data
+#
+#-Ddfs.blocksize=512M \
+#-Dio.file.buffer.size=131072 \
+#-Dmapreduce.map.java.opts=-Xmx1536m \
+#-Dmapreduce.map.memory.mb=2048 \
+#-Dmapreduce.task.io.sort.mb=256 \
+#-Dyarn.app.mapreduce.am.resource.mb=1024 \
+#-Dmapred.map.tasks=64 \
+$HADOOP_PREFIX/bin/hadoop jar \
+ $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \
+ teragen \
+ -Dmapred.map.tasks=9 \
+ $num_records \
+ $INPUT
+
+# Run the sort job
+#
+#-Ddfs.blocksize=512M \
+#-Dio.file.buffer.size=131072 \
+#-Dmapreduce.map.java.opts=-Xmx1536m \
+#-Dmapreduce.map.memory.mb=2048 \
+#-Dmapreduce.map.output.compress=true \
+#-Dmapreduce.map.output.compress.codec=org.apache.hadoop.io.compress.Lz4Codec \
+#-Dmapreduce.reduce.java.opts=-Xmx1536m \
+#-Dmapreduce.reduce.memory.mb=2048 \
+#-Dmapreduce.task.io.sort.factor=100 \
+#-Dmapreduce.task.io.sort.mb=768 \
+#-Dyarn.app.mapreduce.am.resource.mb=1024 \
+#-Dmapred.reduce.tasks=100 \
+#-Dmapreduce.terasort.output.replication=1 \
+$HADOOP_PREFIX/bin/hadoop jar \
+ $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \
+ terasort \
+ -Dmapred.reduce.tasks=10 \
+ $INPUT $OUTPUT
+
+# Validate the sorted data
+#
+#-Ddfs.blocksize=512M \
+#-Dio.file.buffer.size=131072 \
+#-Dmapreduce.map.java.opts=-Xmx1536m \
+#-Dmapreduce.map.memory.mb=2048 \
+#-Dmapreduce.reduce.java.opts=-Xmx1536m \
+#-Dmapreduce.reduce.memory.mb=2048 \
+#-Dmapreduce.task.io.sort.mb=256 \
+#-Dyarn.app.mapreduce.am.resource.mb=1024 \
+#-Dmapred.reduce.tasks=1 \
+$HADOOP_PREFIX/bin/hadoop jar \
+ $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \
+ teravalidate \
+ -Dmapred.reduce.tasks=1 \
+ $OUTPUT $REPORT
+
+exit 0
diff --git a/qa/workunits/hadoop/wordcount.sh b/qa/workunits/hadoop/wordcount.sh
new file mode 100755
index 000000000..616b08af2
--- /dev/null
+++ b/qa/workunits/hadoop/wordcount.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+
+WC_INPUT=/wc_input
+WC_OUTPUT=/wc_output
+DATA_INPUT=$(mktemp -d)
+
+echo "starting hadoop-wordcount test"
+
+# bail if $TESTDIR is not set as this test will fail in that scenario
+[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; }
+
+# if HADOOP_PREFIX is not set, use default
+[ -z $HADOOP_PREFIX ] && { HADOOP_PREFIX=$TESTDIR/hadoop; }
+
+# Nuke hadoop directories
+$HADOOP_PREFIX/bin/hadoop fs -rm -r $WC_INPUT $WC_OUTPUT || true
+
+# Fetch and import testing data set
+curl http://download.ceph.com/qa/hadoop_input_files.tar | tar xf - -C $DATA_INPUT
+$HADOOP_PREFIX/bin/hadoop fs -copyFromLocal $DATA_INPUT $WC_INPUT
+rm -rf $DATA_INPUT
+
+# Run the job
+$HADOOP_PREFIX/bin/hadoop jar \
+ $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \
+ wordcount $WC_INPUT $WC_OUTPUT
+
+# Cleanup
+$HADOOP_PREFIX/bin/hadoop fs -rm -r $WC_INPUT $WC_OUTPUT || true
+
+echo "completed hadoop-wordcount test"
+exit 0
diff --git a/qa/workunits/kernel_untar_build.sh b/qa/workunits/kernel_untar_build.sh
new file mode 100755
index 000000000..9b60f065c
--- /dev/null
+++ b/qa/workunits/kernel_untar_build.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+set -e
+
+wget -O linux.tar.gz http://download.ceph.com/qa/linux-5.4.tar.gz
+
+mkdir t
+cd t
+tar xzf ../linux.tar.gz
+cd linux*
+make defconfig
+make -j`grep -c processor /proc/cpuinfo`
+cd ..
+if ! rm -rv linux* ; then
+ echo "uh oh rm -r failed, it left behind:"
+ find .
+ exit 1
+fi
+cd ..
+rm -rv t linux*
diff --git a/qa/workunits/libcephfs/test.sh b/qa/workunits/libcephfs/test.sh
new file mode 100755
index 000000000..c53fe893c
--- /dev/null
+++ b/qa/workunits/libcephfs/test.sh
@@ -0,0 +1,10 @@
+#!/bin/sh -e
+
+ceph_test_libcephfs
+ceph_test_libcephfs_access
+ceph_test_libcephfs_reclaim
+ceph_test_libcephfs_lazyio
+ceph_test_libcephfs_newops
+ceph_test_libcephfs_suidsgid
+
+exit 0
diff --git a/qa/workunits/mgr/test_localpool.sh b/qa/workunits/mgr/test_localpool.sh
new file mode 100755
index 000000000..40a749e8d
--- /dev/null
+++ b/qa/workunits/mgr/test_localpool.sh
@@ -0,0 +1,21 @@
+#!/bin/sh -ex
+
+ceph config set mgr mgr/localpool/subtree host
+ceph config set mgr mgr/localpool/failure_domain osd
+ceph mgr module enable localpool
+
+while ! ceph osd pool ls | grep '^by-host-'
+do
+ sleep 5
+done
+
+ceph mgr module disable localpool
+for p in `ceph osd pool ls | grep '^by-host-'`
+do
+ ceph osd pool rm $p $p --yes-i-really-really-mean-it
+done
+
+ceph config rm mgr mgr/localpool/subtree
+ceph config rm mgr mgr/localpool/failure_domain
+
+echo OK
diff --git a/qa/workunits/mgr/test_per_module_finisher.sh b/qa/workunits/mgr/test_per_module_finisher.sh
new file mode 100755
index 000000000..dc66bce23
--- /dev/null
+++ b/qa/workunits/mgr/test_per_module_finisher.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+set -ex
+
+# This testcase tests the per module finisher stats for enabled modules
+# using check counter (qa/tasks/check_counter.py).
+
+# 'balancer' commands
+ceph balancer pool ls
+
+# 'crash' commands
+ceph crash ls
+ceph crash ls-new
+
+# 'device' commands
+ceph device query-daemon-health-metrics mon.a
+
+# 'iostat' command
+ceph iostat &
+pid=$!
+sleep 3
+kill -SIGTERM $pid
+
+# 'pg_autoscaler' command
+ceph osd pool autoscale-status
+
+# 'progress' command
+ceph progress
+ceph progress json
+
+# 'status' commands
+ceph fs status
+ceph osd status
+
+# 'telemetry' commands
+ceph telemetry status
+ceph telemetry diff
+
+echo OK
diff --git a/qa/workunits/mon/auth_caps.sh b/qa/workunits/mon/auth_caps.sh
new file mode 100755
index 000000000..1f59ae1f7
--- /dev/null
+++ b/qa/workunits/mon/auth_caps.sh
@@ -0,0 +1,130 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+declare -A keymap
+
+combinations="r w x rw rx wx rwx"
+
+for i in ${combinations}; do
+ k="foo_$i"
+ k=`ceph auth get-or-create-key client.$i mon "allow $i"` || exit 1
+ keymap["$i"]=$k
+done
+
+# add special caps
+keymap["all"]=`ceph auth get-or-create-key client.all mon 'allow *'` || exit 1
+
+tmp=`mktemp`
+ceph auth export > $tmp
+
+trap "rm $tmp" INT ERR EXIT QUIT 0
+
+expect() {
+
+ set +e
+
+ local expected_ret=$1
+ local ret
+
+ shift
+ cmd=$@
+
+ eval $cmd
+ ret=$?
+
+ set -e
+
+ if [[ $ret -ne $expected_ret ]]; then
+ echo "ERROR: running \'$cmd\': expected $expected_ret got $ret"
+ return 1
+ fi
+
+ return 0
+}
+
+read_ops() {
+ local caps=$1
+ local has_read=1 has_exec=1
+ local ret
+ local args
+
+ ( echo $caps | grep 'r' ) || has_read=0
+ ( echo $caps | grep 'x' ) || has_exec=0
+
+ if [[ "$caps" == "all" ]]; then
+ has_read=1
+ has_exec=1
+ fi
+
+ ret=13
+ if [[ $has_read -gt 0 && $has_exec -gt 0 ]]; then
+ ret=0
+ fi
+
+ args="--id $caps --key ${keymap[$caps]}"
+
+ expect $ret ceph auth get client.admin $args
+ expect $ret ceph auth get-key client.admin $args
+ expect $ret ceph auth export $args
+ expect $ret ceph auth export client.admin $args
+ expect $ret ceph auth ls $args
+ expect $ret ceph auth print-key client.admin $args
+ expect $ret ceph auth print_key client.admin $args
+}
+
+write_ops() {
+
+ local caps=$1
+ local has_read=1 has_write=1 has_exec=1
+ local ret
+ local args
+
+ ( echo $caps | grep 'r' ) || has_read=0
+ ( echo $caps | grep 'w' ) || has_write=0
+ ( echo $caps | grep 'x' ) || has_exec=0
+
+ if [[ "$caps" == "all" ]]; then
+ has_read=1
+ has_write=1
+ has_exec=1
+ fi
+
+ ret=13
+ if [[ $has_read -gt 0 && $has_write -gt 0 && $has_exec -gt 0 ]]; then
+ ret=0
+ fi
+
+ args="--id $caps --key ${keymap[$caps]}"
+
+ expect $ret ceph auth add client.foo $args
+ expect $ret "ceph auth caps client.foo mon 'allow *' $args"
+ expect $ret ceph auth get-or-create client.admin $args
+ expect $ret ceph auth get-or-create-key client.admin $args
+ expect $ret ceph auth get-or-create-key client.baz $args
+ expect $ret ceph auth del client.foo $args
+ expect $ret ceph auth del client.baz $args
+ expect $ret ceph auth import -i $tmp $args
+}
+
+echo "running combinations: ${!keymap[@]}"
+
+subcmd=$1
+
+for i in ${!keymap[@]}; do
+ echo "caps: $i"
+ if [[ -z "$subcmd" || "$subcmd" == "read" || "$subcmd" == "all" ]]; then
+ read_ops $i
+ fi
+
+ if [[ -z "$subcmd" || "$subcmd" == "write" || "$subcmd" == "all" ]]; then
+ write_ops $i
+ fi
+done
+
+# cleanup
+for i in ${combinations} all; do
+ ceph auth del client.$i || exit 1
+done
+
+echo "OK"
diff --git a/qa/workunits/mon/auth_key_rotation.sh b/qa/workunits/mon/auth_key_rotation.sh
new file mode 100755
index 000000000..1a53bab6d
--- /dev/null
+++ b/qa/workunits/mon/auth_key_rotation.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/bash -ex
+
+function expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+
+ceph auth export
+ceph auth rm client.rot
+
+ceph auth get-or-create client.rot mon 'allow rwx'
+ceph auth export client.rot | grep key
+ceph auth export client.rot | expect_false grep pending.key
+
+ceph auth get-or-create-pending client.rot
+ceph auth export client.rot | grep key
+ceph auth export client.rot | grep pending.key
+
+ceph auth clear-pending client.rot
+ceph auth export client.rot | expect_false grep pending.key
+
+ceph auth get-or-create-pending client.rot
+ceph auth export client.rot | grep key
+ceph auth export client.rot | grep pending.key
+K=$(ceph auth export client.rot | grep 'key = ' | head -n 1 | awk '{print $3}')
+PK=$(ceph auth export client.rot | grep pending.key | awk '{print $4}')
+echo "K is $K"
+echo "PK is $PK"
+ceph -n client.rot --key $K -s
+
+ceph auth commit-pending client.rot
+ceph auth export client.rot | expect_false grep pending.key
+ceph auth export client.rot | grep key | grep $PK
+
+ceph auth get-or-create-pending client.rot
+ceph auth export client.rot | grep key
+ceph auth export client.rot | grep pending.key
+K=$(ceph auth export client.rot | grep 'key = ' | head -n 1 | awk '{print $3}')
+PK=$(ceph auth export client.rot | grep pending.key | awk '{print $4}')
+echo "2, K is $K"
+echo "2, PK is $PK"
+
+ceph auth export client.rot
+
+while ceph -n client.rot --key $K -s ; do
+ ceph auth export client.rot
+ ceph -n client.rot --key $PK -s
+ sleep 1
+done
+
+ceph auth export client.rot | expect_false grep pending.key
+ceph auth export client.rot | grep key | grep $PK
+
+ceph -n client.rot --key $PK -s
+
+echo ok
diff --git a/qa/workunits/mon/caps.py b/qa/workunits/mon/caps.py
new file mode 100644
index 000000000..26c0cd14c
--- /dev/null
+++ b/qa/workunits/mon/caps.py
@@ -0,0 +1,359 @@
+#!/usr/bin/python3
+
+from __future__ import print_function
+
+import subprocess
+import shlex
+import errno
+import sys
+import os
+import io
+import re
+
+from ceph_argparse import * # noqa
+
+keyring_base = '/tmp/cephtest-caps.keyring'
+
+class UnexpectedReturn(Exception):
+ def __init__(self, cmd, ret, expected, msg):
+ if isinstance(cmd, list):
+ self.cmd = ' '.join(cmd)
+ else:
+ assert isinstance(cmd, str), 'cmd needs to be either a list or a str'
+ self.cmd = cmd
+ self.cmd = str(self.cmd)
+ self.ret = int(ret)
+ self.expected = int(expected)
+ self.msg = str(msg)
+
+ def __str__(self):
+ return repr('{c}: expected return {e}, got {r} ({o})'.format(
+ c=self.cmd, e=self.expected, r=self.ret, o=self.msg))
+
+def call(cmd):
+ if isinstance(cmd, list):
+ args = cmd
+ elif isinstance(cmd, str):
+ args = shlex.split(cmd)
+ else:
+ assert False, 'cmd is not a string/unicode nor a list!'
+
+ print('call: {0}'.format(args))
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ ret = proc.wait()
+
+ return (ret, proc)
+
+def expect(cmd, expected_ret):
+
+ try:
+ (r, p) = call(cmd)
+ except ValueError as e:
+ print('unable to run {c}: {err}'.format(c=repr(cmd), err=e.message),
+ file=sys.stderr)
+ return errno.EINVAL
+
+ assert r == p.returncode, \
+ 'wth? r was supposed to match returncode!'
+
+ if r != expected_ret:
+ raise UnexpectedReturn(repr(cmd), r, expected_ret, str(p.stderr.read()))
+
+ return p
+
+def expect_to_file(cmd, expected_ret, out_file):
+
+ # Let the exception be propagated to the caller
+ p = expect(cmd, expected_ret)
+ assert p.returncode == expected_ret, \
+ 'expected result doesn\'t match and no exception was thrown!'
+
+ with io.open(out_file, 'ab') as file:
+ file.write(p.stdout.read())
+
+ return p
+
+class Command:
+ def __init__(self, cid, j):
+ self.cid = cid[3:]
+ self.perms = j['perm']
+ self.module = j['module']
+
+ self.sig = ''
+ self.args = []
+ for s in j['sig']:
+ if not isinstance(s, dict):
+ assert isinstance(s, str), \
+ 'malformatted signature cid {0}: {1}\n{2}'.format(cid,s,j)
+ if len(self.sig) > 0:
+ self.sig += ' '
+ self.sig += s
+ else:
+ self.args.append(s)
+
+ def __str__(self):
+ return repr('command {0}: {1} (requires \'{2}\')'.format(self.cid,\
+ self.sig, self.perms))
+
+
+def destroy_keyring(path):
+ if not os.path.exists(path):
+ raise Exception('oops! cannot remove inexistent keyring {0}'.format(path))
+
+ # grab all client entities from the keyring
+ entities = [m.group(1) for m in [re.match(r'\[client\.(.*)\]', l)
+ for l in [str(line.strip())
+ for line in io.open(path,'r')]] if m is not None]
+
+ # clean up and make sure each entity is gone
+ for e in entities:
+ expect('ceph auth del client.{0}'.format(e), 0)
+ expect('ceph auth get client.{0}'.format(e), errno.ENOENT)
+
+ # remove keyring
+ os.unlink(path)
+
+ return True
+
+def test_basic_auth():
+ # make sure we can successfully add/del entities, change their caps
+ # and import/export keyrings.
+
+ expect('ceph auth add client.basicauth', 0)
+ expect('ceph auth caps client.basicauth mon \'allow *\'', 0)
+ # entity exists and caps do not match
+ expect('ceph auth add client.basicauth', errno.EINVAL)
+ # this command attempts to change an existing state and will fail
+ expect('ceph auth add client.basicauth mon \'allow w\'', errno.EINVAL)
+ expect('ceph auth get-or-create client.basicauth', 0)
+ expect('ceph auth get-key client.basicauth', 0)
+ expect('ceph auth get-or-create client.basicauth2', 0)
+ # cleanup
+ expect('ceph auth del client.basicauth', 0)
+ expect('ceph auth del client.basicauth2', 0)
+
+ return True
+
+def gen_module_keyring(module):
+ module_caps = [
+ ('all', '{t} \'allow service {s} rwx\'', 0),
+ ('none', '', errno.EACCES),
+ ('wrong', '{t} \'allow service foobar rwx\'', errno.EACCES),
+ ('right', '{t} \'allow service {s} {p}\'', 0),
+ ('no-execute', '{t} \'allow service {s} x\'', errno.EACCES)
+ ]
+
+ keyring = '{0}.service-{1}'.format(keyring_base,module)
+ for perms in 'r rw x'.split():
+ for (n,p,r) in module_caps:
+ c = p.format(t='mon', s=module, p=perms)
+ expect_to_file(
+ 'ceph auth get-or-create client.{cn}-{cp} {caps}'.format(
+ cn=n,cp=perms,caps=c), 0, keyring)
+
+ return keyring
+
+
+def test_all():
+
+
+ perms = {
+ 'good': {
+ 'broad':[
+ ('rwx', 'allow *'),
+ ('r', 'allow r'),
+ ('rw', 'allow rw'),
+ ('x', 'allow x'),
+ ],
+ 'service':[
+ ('rwx', 'allow service {s} rwx'),
+ ('r', 'allow service {s} r'),
+ ('rw', 'allow service {s} rw'),
+ ('x', 'allow service {s} x'),
+ ],
+ 'command':[
+ ('rwx', 'allow command "{c}"'),
+ ],
+ 'command-with':[
+ ('rwx', 'allow command "{c}" with {kv}')
+ ],
+ 'command-with-prefix':[
+ ('rwx', 'allow command "{c}" with {key} prefix {val}')
+ ]
+ },
+ 'bad': {
+ 'broad':[
+ ('none', ''),
+ ],
+ 'service':[
+ ('none1', 'allow service foo rwx'),
+ ('none2', 'allow service foo r'),
+ ('none3', 'allow service foo rw'),
+ ('none4', 'allow service foo x'),
+ ],
+ 'command':[
+ ('none', 'allow command foo'),
+ ],
+ 'command-with':[
+ ('none', 'allow command "{c}" with foo=bar'),
+ ],
+ 'command-with-prefix':[
+ ('none', 'allow command "{c}" with foo prefix bar'),
+ ],
+ }
+ }
+
+ cmds = {
+ '':[
+ {
+ 'cmd':('status', '', 'r')
+ },
+ {
+ 'pre':'heap start_profiler',
+ 'cmd':('heap', 'heapcmd=stats', 'rw'),
+ 'post':'heap stop_profiler'
+ }
+ ],
+ 'auth':[
+ {
+ 'pre':'',
+ 'cmd':('auth ls', '', 'r'),
+ 'post':''
+ },
+ {
+ 'pre':'auth get-or-create client.foo mon \'allow *\'',
+ 'cmd':('auth caps', 'entity="client.foo"', 'rw'),
+ 'post':'auth del client.foo'
+ }
+ ],
+ 'pg':[
+ {
+ 'cmd':('pg getmap', '', 'r'),
+ },
+ ],
+ 'mds':[
+ {
+ 'cmd':('mds getmap', '', 'r'),
+ },
+ ],
+ 'mon':[
+ {
+ 'cmd':('mon getmap', '', 'r')
+ },
+ {
+ 'cmd':('mon remove', 'name=a', 'rw')
+ }
+ ],
+ 'osd':[
+ {
+ 'cmd':('osd getmap', '', 'r'),
+ },
+ {
+ 'cmd':('osd pause', '', 'rw'),
+ 'post':'osd unpause'
+ },
+ {
+ 'cmd':('osd crush dump', '', 'r')
+ },
+ ],
+ 'config-key':[
+ {
+ 'pre':'config-key set foo bar',
+ 'cmd':('config-key get', 'key=foo', 'r')
+ },
+ {
+ 'pre':'config-key set foo bar',
+ 'cmd':('config-key del', 'key=foo', 'rw')
+ }
+ ]
+ }
+
+ for (module,cmd_lst) in cmds.items():
+ k = keyring_base + '.' + module
+ for cmd in cmd_lst:
+
+ (cmd_cmd, cmd_args, cmd_perm) = cmd['cmd']
+ cmd_args_key = ''
+ cmd_args_val = ''
+ if len(cmd_args) > 0:
+ (cmd_args_key, cmd_args_val) = cmd_args.split('=')
+
+ print('generating keyring for {m}/{c}'.format(m=module,c=cmd_cmd))
+ # gen keyring
+ for (good_or_bad,kind_map) in perms.items():
+ for (kind,lst) in kind_map.items():
+ for (perm, cap) in lst:
+ cap_formatted = cap.format(
+ s=module,
+ c=cmd_cmd,
+ kv=cmd_args,
+ key=cmd_args_key,
+ val=cmd_args_val)
+
+ if len(cap_formatted) == 0:
+ run_cap = ''
+ else:
+ run_cap = 'mon \'{fc}\''.format(fc=cap_formatted)
+
+ cname = 'client.{gb}-{kind}-{p}'.format(
+ gb=good_or_bad,kind=kind,p=perm)
+ expect_to_file(
+ 'ceph auth get-or-create {n} {c}'.format(
+ n=cname,c=run_cap), 0, k)
+ # keyring generated
+ print('testing {m}/{c}'.format(m=module,c=cmd_cmd))
+
+ # test
+ for good_bad in perms.keys():
+ for (kind,lst) in perms[good_bad].items():
+ for (perm,_) in lst:
+ cname = 'client.{gb}-{k}-{p}'.format(gb=good_bad,k=kind,p=perm)
+
+ if good_bad == 'good':
+ expect_ret = 0
+ else:
+ expect_ret = errno.EACCES
+
+ if ( cmd_perm not in perm ):
+ expect_ret = errno.EACCES
+ if 'with' in kind and len(cmd_args) == 0:
+ expect_ret = errno.EACCES
+ if 'service' in kind and len(module) == 0:
+ expect_ret = errno.EACCES
+
+ if 'pre' in cmd and len(cmd['pre']) > 0:
+ expect('ceph {0}'.format(cmd['pre']), 0)
+ expect('ceph -n {cn} -k {k} {c} {arg_val}'.format(
+ cn=cname,k=k,c=cmd_cmd,arg_val=cmd_args_val), expect_ret)
+ if 'post' in cmd and len(cmd['post']) > 0:
+ expect('ceph {0}'.format(cmd['post']), 0)
+ # finish testing
+ destroy_keyring(k)
+
+
+ return True
+
+
+def test_misc():
+
+ k = keyring_base + '.misc'
+ expect_to_file(
+ 'ceph auth get-or-create client.caps mon \'allow command "auth caps"' \
+ ' with entity="client.caps"\'', 0, k)
+ expect('ceph -n client.caps -k {kf} quorum_status'.format(kf=k), errno.EACCES)
+ expect('ceph -n client.caps -k {kf} auth caps client.caps mon \'allow *\''.format(kf=k), 0)
+ expect('ceph -n client.caps -k {kf} quorum_status'.format(kf=k), 0)
+ destroy_keyring(k)
+
+def main():
+
+ test_basic_auth()
+ test_all()
+ test_misc()
+
+ print('OK')
+
+ return 0
+
+if __name__ == '__main__':
+ main()
diff --git a/qa/workunits/mon/caps.sh b/qa/workunits/mon/caps.sh
new file mode 100755
index 000000000..eae5d8665
--- /dev/null
+++ b/qa/workunits/mon/caps.sh
@@ -0,0 +1,90 @@
+#!/usr/bin/env bash
+
+set -x
+
+tmp=/tmp/cephtest-mon-caps-madness
+
+exit_on_error=1
+
+[[ ! -z $TEST_EXIT_ON_ERROR ]] && exit_on_error=$TEST_EXIT_ON_ERROR
+
+if [ `uname` = FreeBSD ]; then
+ ETIMEDOUT=60
+else
+ ETIMEDOUT=110
+fi
+
+expect()
+{
+ cmd=$1
+ expected_ret=$2
+
+ echo $cmd
+ eval $cmd >&/dev/null
+ ret=$?
+
+ if [[ $ret -ne $expected_ret ]]; then
+ echo "Error: Expected return $expected_ret, got $ret"
+ [[ $exit_on_error -eq 1 ]] && exit 1
+ return 1
+ fi
+
+ return 0
+}
+
+expect "ceph auth get-or-create client.bazar > $tmp.bazar.keyring" 0
+expect "ceph -k $tmp.bazar.keyring --user bazar quorum_status" 13
+ceph auth del client.bazar
+
+c="'allow command \"auth ls\", allow command quorum_status'"
+expect "ceph auth get-or-create client.foo mon $c > $tmp.foo.keyring" 0
+expect "ceph -k $tmp.foo.keyring --user foo quorum_status" 0
+expect "ceph -k $tmp.foo.keyring --user foo auth ls" 0
+expect "ceph -k $tmp.foo.keyring --user foo auth export" 13
+expect "ceph -k $tmp.foo.keyring --user foo auth del client.bazar" 13
+expect "ceph -k $tmp.foo.keyring --user foo osd dump" 13
+
+# monitor drops the subscribe message from client if it does not have enough caps
+# for read from mon. in that case, the client will be waiting for mgrmap in vain,
+# if it is instructed to send a command to mgr. "pg dump" is served by mgr. so,
+# we need to set a timeout for testing this scenario.
+#
+# leave plenty of time here because the mons might be thrashing.
+export CEPH_ARGS='--rados-mon-op-timeout=300'
+expect "ceph -k $tmp.foo.keyring --user foo pg dump" $ETIMEDOUT
+export CEPH_ARGS=''
+
+ceph auth del client.foo
+expect "ceph -k $tmp.foo.keyring --user foo quorum_status" 13
+
+c="'allow command service with prefix=list, allow command quorum_status'"
+expect "ceph auth get-or-create client.bar mon $c > $tmp.bar.keyring" 0
+expect "ceph -k $tmp.bar.keyring --user bar quorum_status" 0
+expect "ceph -k $tmp.bar.keyring --user bar auth ls" 13
+expect "ceph -k $tmp.bar.keyring --user bar auth export" 13
+expect "ceph -k $tmp.bar.keyring --user bar auth del client.foo" 13
+expect "ceph -k $tmp.bar.keyring --user bar osd dump" 13
+
+# again, we'll need to timeout.
+export CEPH_ARGS='--rados-mon-op-timeout=300'
+expect "ceph -k $tmp.bar.keyring --user bar pg dump" $ETIMEDOUT
+export CEPH_ARGS=''
+
+ceph auth del client.bar
+expect "ceph -k $tmp.bar.keyring --user bar quorum_status" 13
+
+rm $tmp.bazar.keyring $tmp.foo.keyring $tmp.bar.keyring
+
+# invalid caps health warning
+cat <<EOF | ceph auth import -i -
+[client.bad]
+ caps mon = this is wrong
+ caps osd = does not parse
+ caps mds = also does not parse
+EOF
+ceph health | grep AUTH_BAD_CAP
+ceph health detail | grep client.bad
+ceph auth rm client.bad
+expect "ceph auth health | grep AUTH_BAD_CAP" 1
+
+echo OK
diff --git a/qa/workunits/mon/config.sh b/qa/workunits/mon/config.sh
new file mode 100755
index 000000000..1b00201ae
--- /dev/null
+++ b/qa/workunits/mon/config.sh
@@ -0,0 +1,136 @@
+#!/bin/bash -ex
+
+function expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+ceph config dump
+
+# value validation
+ceph config set mon.a debug_asok 22
+ceph config set mon.a debug_asok 22/33
+ceph config get mon.a debug_asok | grep 22
+ceph config set mon.a debug_asok 1/2
+expect_false ceph config set mon.a debug_asok foo
+expect_false ceph config set mon.a debug_asok -10
+ceph config rm mon.a debug_asok
+
+ceph config set global log_graylog_port 123
+expect_false ceph config set global log_graylog_port asdf
+ceph config rm global log_graylog_port
+
+ceph config set mon mon_cluster_log_to_stderr true
+ceph config get mon.a mon_cluster_log_to_stderr | grep true
+ceph config set mon mon_cluster_log_to_stderr 2
+ceph config get mon.a mon_cluster_log_to_stderr | grep true
+ceph config set mon mon_cluster_log_to_stderr 1
+ceph config get mon.a mon_cluster_log_to_stderr | grep true
+ceph config set mon mon_cluster_log_to_stderr false
+ceph config get mon.a mon_cluster_log_to_stderr | grep false
+ceph config set mon mon_cluster_log_to_stderr 0
+ceph config get mon.a mon_cluster_log_to_stderr | grep false
+expect_false ceph config set mon mon_cluster_log_to_stderr fiddle
+expect_false ceph config set mon mon_cluster_log_to_stderr ''
+ceph config rm mon mon_cluster_log_to_stderr
+
+expect_false ceph config set mon.a osd_pool_default_type foo
+ceph config set mon.a osd_pool_default_type replicated
+ceph config rm mon.a osd_pool_default_type
+
+# scoping
+ceph config set global debug_asok 33
+ceph config get mon.a debug_asok | grep 33
+ceph config set mon debug_asok 11
+ceph config get mon.a debug_asok | grep 11
+ceph config set mon.a debug_asok 22
+ceph config get mon.a debug_asok | grep 22
+ceph config rm mon.a debug_asok
+ceph config get mon.a debug_asok | grep 11
+ceph config rm mon debug_asok
+ceph config get mon.a debug_asok | grep 33
+# nested .-prefix scoping
+ceph config set client.foo debug_asok 44
+ceph config get client.foo.bar debug_asok | grep 44
+ceph config get client.foo.bar.baz debug_asok | grep 44
+ceph config set client.foo.bar debug_asok 55
+ceph config get client.foo.bar.baz debug_asok | grep 55
+ceph config rm client.foo debug_asok
+ceph config get client.foo.bar.baz debug_asok | grep 55
+ceph config rm client.foo.bar debug_asok
+ceph config get client.foo.bar.baz debug_asok | grep 33
+ceph config rm global debug_asok
+
+# whitespace keys
+ceph config set client.foo 'debug asok' 44
+ceph config get client.foo 'debug asok' | grep 44
+ceph config set client.foo debug_asok 55
+ceph config get client.foo 'debug asok' | grep 55
+ceph config set client.foo 'debug asok' 66
+ceph config get client.foo debug_asok | grep 66
+ceph config rm client.foo debug_asok
+ceph config set client.foo debug_asok 66
+ceph config rm client.foo 'debug asok'
+
+# help
+ceph config help debug_asok | grep debug_asok
+
+# show
+ceph config set osd.0 debug_asok 33
+while ! ceph config show osd.0 | grep debug_asok | grep 33 | grep mon
+do
+ sleep 1
+done
+ceph config set osd.0 debug_asok 22
+while ! ceph config show osd.0 | grep debug_asok | grep 22 | grep mon
+do
+ sleep 1
+done
+
+ceph tell osd.0 config set debug_asok 99
+while ! ceph config show osd.0 | grep debug_asok | grep 99
+do
+ sleep 1
+done
+ceph config show osd.0 | grep debug_asok | grep 'override mon'
+ceph tell osd.0 config unset debug_asok
+ceph tell osd.0 config unset debug_asok
+
+ceph config rm osd.0 debug_asok
+while ceph config show osd.0 | grep debug_asok | grep mon
+do
+ sleep 1
+done
+ceph config show osd.0 | grep -c debug_asok | grep 0
+
+ceph config set osd.0 osd_scrub_cost 123
+while ! ceph config show osd.0 | grep osd_scrub_cost | grep mon
+do
+ sleep 1
+done
+ceph config rm osd.0 osd_scrub_cost
+
+# show-with-defaults
+ceph config show-with-defaults osd.0 | grep debug_asok
+
+# assimilate
+t1=`mktemp`
+t2=`mktemp`
+cat <<EOF > $t1
+[osd.0]
+keyring = foo
+debug_asok = 66
+EOF
+ceph config assimilate-conf -i $t1 | tee $t2
+
+grep keyring $t2
+expect_false grep debug_asok $t2
+rm -f $t1 $t2
+
+expect_false ceph config reset
+expect_false ceph config reset -1
+# we are at end of testing, so it's okay to revert everything
+ceph config reset 0
+
+echo OK
diff --git a/qa/workunits/mon/crush_ops.sh b/qa/workunits/mon/crush_ops.sh
new file mode 100755
index 000000000..a68761985
--- /dev/null
+++ b/qa/workunits/mon/crush_ops.sh
@@ -0,0 +1,237 @@
+#!/usr/bin/env bash
+
+set -ex
+
+function expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+ceph osd crush dump
+
+# rules
+ceph osd crush rule dump
+ceph osd crush rule ls
+ceph osd crush rule list
+
+ceph osd crush rule create-simple foo default host
+ceph osd crush rule create-simple foo default host
+ceph osd crush rule create-simple bar default host
+
+ceph osd crush rm-device-class all
+ceph osd crush set-device-class ssd osd.0
+ceph osd crush set-device-class hdd osd.1
+ceph osd crush rule create-replicated foo-ssd default host ssd
+ceph osd crush rule create-replicated foo-hdd default host hdd
+ceph osd crush rule ls-by-class ssd | grep 'foo-ssd'
+ceph osd crush rule ls-by-class ssd | expect_false grep 'foo-hdd'
+ceph osd crush rule ls-by-class hdd | grep 'foo-hdd'
+ceph osd crush rule ls-by-class hdd | expect_false grep 'foo-ssd'
+
+ceph osd erasure-code-profile set ec-foo-ssd crush-device-class=ssd m=2 k=2
+ceph osd pool create ec-foo 2 erasure ec-foo-ssd
+ceph osd pool rm ec-foo ec-foo --yes-i-really-really-mean-it
+
+ceph osd crush rule ls | grep foo
+
+ceph osd crush rule rename foo foo-asdf
+ceph osd crush rule rename foo foo-asdf # idempotent
+ceph osd crush rule rename bar bar-asdf
+ceph osd crush rule ls | grep 'foo-asdf'
+ceph osd crush rule ls | grep 'bar-asdf'
+ceph osd crush rule rm foo 2>&1 | grep 'does not exist'
+ceph osd crush rule rm bar 2>&1 | grep 'does not exist'
+ceph osd crush rule rename foo-asdf foo
+ceph osd crush rule rename foo-asdf foo # idempotent
+ceph osd crush rule rename bar-asdf bar
+ceph osd crush rule ls | expect_false grep 'foo-asdf'
+ceph osd crush rule ls | expect_false grep 'bar-asdf'
+ceph osd crush rule rm foo
+ceph osd crush rule rm foo # idempotent
+ceph osd crush rule rm bar
+
+# can't delete in-use rules, tho:
+ceph osd pool create pinning_pool 1
+expect_false ceph osd crush rule rm replicated_rule
+ceph osd pool rm pinning_pool pinning_pool --yes-i-really-really-mean-it
+
+# build a simple map
+expect_false ceph osd crush add-bucket foo osd
+ceph osd crush add-bucket foo root
+o1=`ceph osd create`
+o2=`ceph osd create`
+ceph osd crush add $o1 1 host=host1 root=foo
+ceph osd crush add $o1 1 host=host1 root=foo # idemptoent
+ceph osd crush add $o2 1 host=host2 root=foo
+ceph osd crush add $o2 1 host=host2 root=foo # idempotent
+ceph osd crush add-bucket bar root
+ceph osd crush add-bucket bar root # idempotent
+ceph osd crush link host1 root=bar
+ceph osd crush link host1 root=bar # idempotent
+ceph osd crush link host2 root=bar
+ceph osd crush link host2 root=bar # idempotent
+
+ceph osd tree | grep -c osd.$o1 | grep -q 2
+ceph osd tree | grep -c host1 | grep -q 2
+ceph osd tree | grep -c osd.$o2 | grep -q 2
+ceph osd tree | grep -c host2 | grep -q 2
+expect_false ceph osd crush rm host1 foo # not empty
+ceph osd crush unlink host1 foo
+ceph osd crush unlink host1 foo
+ceph osd tree | grep -c host1 | grep -q 1
+
+expect_false ceph osd crush rm foo # not empty
+expect_false ceph osd crush rm bar # not empty
+ceph osd crush unlink host1 bar
+ceph osd tree | grep -c host1 | grep -q 1 # now an orphan
+ceph osd crush rm osd.$o1 host1
+ceph osd crush rm host1
+ceph osd tree | grep -c host1 | grep -q 0
+expect_false ceph osd tree-from host1
+ceph osd tree-from host2
+expect_false ceph osd tree-from osd.$o2
+
+expect_false ceph osd crush rm bar # not empty
+ceph osd crush unlink host2
+
+ceph osd crush add-bucket host-for-test host root=root-for-test rack=rack-for-test
+ceph osd tree | grep host-for-test
+ceph osd tree | grep rack-for-test
+ceph osd tree | grep root-for-test
+ceph osd crush rm host-for-test
+ceph osd crush rm rack-for-test
+ceph osd crush rm root-for-test
+
+# reference foo and bar with a rule
+ceph osd crush rule create-simple foo-rule foo host firstn
+expect_false ceph osd crush rm foo
+ceph osd crush rule rm foo-rule
+
+ceph osd crush rm bar
+ceph osd crush rm foo
+ceph osd crush rm osd.$o2 host2
+ceph osd crush rm host2
+
+ceph osd crush add-bucket foo host
+ceph osd crush move foo root=default rack=localrack
+
+ceph osd crush create-or-move osd.$o1 1.0 root=default
+ceph osd crush move osd.$o1 host=foo
+ceph osd find osd.$o1 | grep host | grep foo
+
+ceph osd crush rm osd.$o1
+ceph osd crush rm osd.$o2
+
+ceph osd crush rm foo
+
+# test reweight
+o3=`ceph osd create`
+ceph osd crush add $o3 123 root=default
+ceph osd tree | grep osd.$o3 | grep 123
+ceph osd crush reweight osd.$o3 113
+expect_false ceph osd crush reweight osd.$o3 123456
+ceph osd tree | grep osd.$o3 | grep 113
+ceph osd crush rm osd.$o3
+ceph osd rm osd.$o3
+
+# test reweight-subtree
+o4=`ceph osd create`
+o5=`ceph osd create`
+ceph osd crush add $o4 123 root=default host=foobaz
+ceph osd crush add $o5 123 root=default host=foobaz
+ceph osd tree | grep osd.$o4 | grep 123
+ceph osd tree | grep osd.$o5 | grep 123
+ceph osd crush reweight-subtree foobaz 155
+expect_false ceph osd crush reweight-subtree foobaz 123456
+ceph osd tree | grep osd.$o4 | grep 155
+ceph osd tree | grep osd.$o5 | grep 155
+ceph osd crush rm osd.$o4
+ceph osd crush rm osd.$o5
+ceph osd rm osd.$o4
+ceph osd rm osd.$o5
+
+# weight sets
+# make sure we require luminous before testing weight-sets
+ceph osd set-require-min-compat-client luminous
+ceph osd crush weight-set dump
+ceph osd crush weight-set ls
+expect_false ceph osd crush weight-set reweight fooset osd.0 .9
+ceph osd pool create fooset 8
+ceph osd pool create barset 8
+ceph osd pool set barset size 3
+expect_false ceph osd crush weight-set reweight fooset osd.0 .9
+ceph osd crush weight-set create fooset flat
+ceph osd crush weight-set create barset positional
+ceph osd crush weight-set ls | grep fooset
+ceph osd crush weight-set ls | grep barset
+ceph osd crush weight-set dump
+ceph osd crush weight-set reweight fooset osd.0 .9
+expect_false ceph osd crush weight-set reweight fooset osd.0 .9 .9
+expect_false ceph osd crush weight-set reweight barset osd.0 .9
+ceph osd crush weight-set reweight barset osd.0 .9 .9 .9
+ceph osd crush weight-set ls | grep -c fooset | grep -q 1
+ceph osd crush weight-set rm fooset
+ceph osd crush weight-set ls | grep -c fooset | grep -q 0
+ceph osd crush weight-set ls | grep barset
+ceph osd crush weight-set rm barset
+ceph osd crush weight-set ls | grep -c barset | grep -q 0
+ceph osd crush weight-set create-compat
+ceph osd crush weight-set ls | grep '(compat)'
+ceph osd crush weight-set rm-compat
+
+# weight set vs device classes
+ceph osd pool create cool 2
+ceph osd pool create cold 2
+ceph osd pool set cold size 2
+ceph osd crush weight-set create-compat
+ceph osd crush weight-set create cool flat
+ceph osd crush weight-set create cold positional
+ceph osd crush rm-device-class osd.0
+ceph osd crush weight-set reweight-compat osd.0 10.5
+ceph osd crush weight-set reweight cool osd.0 11.5
+ceph osd crush weight-set reweight cold osd.0 12.5 12.4
+ceph osd crush set-device-class fish osd.0
+ceph osd crush tree --show-shadow | grep osd\\.0 | grep fish | grep 10\\.
+ceph osd crush tree --show-shadow | grep osd\\.0 | grep fish | grep 11\\.
+ceph osd crush tree --show-shadow | grep osd\\.0 | grep fish | grep 12\\.
+ceph osd crush rm-device-class osd.0
+ceph osd crush set-device-class globster osd.0
+ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 10\\.
+ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 11\\.
+ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 12\\.
+ceph osd crush weight-set reweight-compat osd.0 7.5
+ceph osd crush weight-set reweight cool osd.0 8.5
+ceph osd crush weight-set reweight cold osd.0 6.5 6.6
+ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 7\\.
+ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 8\\.
+ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 6\\.
+ceph osd crush rm-device-class osd.0
+ceph osd pool rm cool cool --yes-i-really-really-mean-it
+ceph osd pool rm cold cold --yes-i-really-really-mean-it
+ceph osd crush weight-set rm-compat
+
+# weight set vs device classes vs move
+ceph osd crush weight-set create-compat
+ceph osd crush add-bucket fooo host
+ceph osd crush move fooo root=default
+ceph osd crush add-bucket barr rack
+ceph osd crush move barr root=default
+ceph osd crush move fooo rack=barr
+ceph osd crush rm fooo
+ceph osd crush rm barr
+ceph osd crush weight-set rm-compat
+
+# this sequence would crash at one point
+ceph osd crush weight-set create-compat
+ceph osd crush add-bucket r1 rack root=default
+for f in `seq 1 32`; do
+ ceph osd crush add-bucket h$f host rack=r1
+done
+for f in `seq 1 32`; do
+ ceph osd crush rm h$f
+done
+ceph osd crush rm r1
+ceph osd crush weight-set rm-compat
+
+echo OK
diff --git a/qa/workunits/mon/osd.sh b/qa/workunits/mon/osd.sh
new file mode 100755
index 000000000..535d6c137
--- /dev/null
+++ b/qa/workunits/mon/osd.sh
@@ -0,0 +1,24 @@
+#!/bin/sh -x
+
+set -e
+
+ua=`uuidgen`
+ub=`uuidgen`
+
+# should get same id with same uuid
+na=`ceph osd create $ua`
+test $na -eq `ceph osd create $ua`
+
+nb=`ceph osd create $ub`
+test $nb -eq `ceph osd create $ub`
+test $nb -ne $na
+
+ceph osd rm $na
+ceph osd rm $na
+ceph osd rm $nb
+ceph osd rm 1000
+
+na2=`ceph osd create $ua`
+
+echo OK
+
diff --git a/qa/workunits/mon/pg_autoscaler.sh b/qa/workunits/mon/pg_autoscaler.sh
new file mode 100755
index 000000000..4cf71a31c
--- /dev/null
+++ b/qa/workunits/mon/pg_autoscaler.sh
@@ -0,0 +1,156 @@
+#!/bin/bash -ex
+
+NUM_OSDS=$(ceph osd ls | wc -l)
+if [ $NUM_OSDS -lt 6 ]; then
+ echo "test requires at least 6 OSDs"
+ exit 1
+fi
+
+NUM_POOLS=$(ceph osd pool ls | wc -l)
+if [ $NUM_POOLS -gt 0 ]; then
+ echo "test requires no preexisting pools"
+ exit 1
+fi
+
+function wait_for() {
+ local sec=$1
+ local cmd=$2
+
+ while true ; do
+ if bash -c "$cmd" ; then
+ break
+ fi
+ sec=$(( $sec - 1 ))
+ if [ $sec -eq 0 ]; then
+ echo failed
+ return 1
+ fi
+ sleep 1
+ done
+ return 0
+}
+
+function power2() { echo "x=l($1)/l(2); scale=0; 2^((x+0.5)/1)" | bc -l;}
+
+function eval_actual_expected_val() {
+ local actual_value=$1
+ local expected_value=$2
+ if [[ $actual_value = $expected_value ]]
+ then
+ echo "Success: " $actual_value "=" $expected_value
+ else
+ echo "Error: " $actual_value "!=" $expected_value
+ exit 1
+ fi
+}
+
+# enable
+ceph config set mgr mgr/pg_autoscaler/sleep_interval 60
+ceph mgr module enable pg_autoscaler
+# ceph config set global osd_pool_default_pg_autoscale_mode on
+
+# pg_num_min
+ceph osd pool create meta0 16
+ceph osd pool create bulk0 16 --bulk
+ceph osd pool create bulk1 16 --bulk
+ceph osd pool create bulk2 16 --bulk
+ceph osd pool set meta0 pg_autoscale_mode on
+ceph osd pool set bulk0 pg_autoscale_mode on
+ceph osd pool set bulk1 pg_autoscale_mode on
+ceph osd pool set bulk2 pg_autoscale_mode on
+# set pool size
+ceph osd pool set meta0 size 2
+ceph osd pool set bulk0 size 2
+ceph osd pool set bulk1 size 2
+ceph osd pool set bulk2 size 2
+
+# get num pools again since we created more pools
+NUM_POOLS=$(ceph osd pool ls | wc -l)
+
+# get bulk flag of each pool through the command ceph osd pool autoscale-status
+BULK_FLAG_1=$(ceph osd pool autoscale-status | grep 'meta0' | grep -o -m 1 'True\|False' || true)
+BULK_FLAG_2=$(ceph osd pool autoscale-status | grep 'bulk0' | grep -o -m 1 'True\|False' || true)
+BULK_FLAG_3=$(ceph osd pool autoscale-status | grep 'bulk1' | grep -o -m 1 'True\|False' || true)
+BULK_FLAG_4=$(ceph osd pool autoscale-status | grep 'bulk2' | grep -o -m 1 'True\|False' || true)
+
+# evaluate the accuracy of ceph osd pool autoscale-status specifically the `BULK` column
+
+eval_actual_expected_val $BULK_FLAG_1 'False'
+eval_actual_expected_val $BULK_FLAG_2 'True'
+eval_actual_expected_val $BULK_FLAG_3 'True'
+eval_actual_expected_val $BULK_FLAG_4 'True'
+
+# This part of this code will now evaluate the accuracy of the autoscaler
+
+# get pool size
+POOL_SIZE_1=$(ceph osd pool get meta0 size| grep -Eo '[0-9]{1,4}')
+POOL_SIZE_2=$(ceph osd pool get bulk0 size| grep -Eo '[0-9]{1,4}')
+POOL_SIZE_3=$(ceph osd pool get bulk1 size| grep -Eo '[0-9]{1,4}')
+POOL_SIZE_4=$(ceph osd pool get bulk2 size| grep -Eo '[0-9]{1,4}')
+
+# Calculate target pg of each pools
+# First Pool is a non-bulk so we do it first.
+# Since the Capacity ratio = 0 we first meta pool remains the same pg_num
+
+TARGET_PG_1=$(ceph osd pool get meta0 pg_num| grep -Eo '[0-9]{1,4}')
+PG_LEFT=$NUM_OSDS*100
+NUM_POOLS_LEFT=$NUM_POOLS-1
+# Rest of the pool is bulk and even pools so pretty straight forward
+# calculations.
+TARGET_PG_2=$(power2 $((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_2))))
+TARGET_PG_3=$(power2 $((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_3))))
+TARGET_PG_4=$(power2 $((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_4))))
+
+# evaluate target_pg against pg num of each pools
+wait_for 300 "ceph osd pool get meta0 pg_num | grep $TARGET_PG_1"
+wait_for 300 "ceph osd pool get bulk0 pg_num | grep $TARGET_PG_2"
+wait_for 300 "ceph osd pool get bulk1 pg_num | grep $TARGET_PG_3"
+wait_for 300 "ceph osd pool get bulk2 pg_num | grep $TARGET_PG_4"
+
+# target ratio
+ceph osd pool set meta0 target_size_ratio 5
+ceph osd pool set bulk0 target_size_ratio 1
+sleep 60
+APGS=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num_target')
+BPGS=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target')
+test $APGS -gt 100
+test $BPGS -gt 10
+
+# small ratio change does not change pg_num
+ceph osd pool set meta0 target_size_ratio 7
+ceph osd pool set bulk0 target_size_ratio 2
+sleep 60
+APGS2=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num_target')
+BPGS2=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target')
+test $APGS -eq $APGS2
+test $BPGS -eq $BPGS2
+
+# target_size
+ceph osd pool set meta0 target_size_bytes 1000000000000000
+ceph osd pool set bulk0 target_size_bytes 1000000000000000
+ceph osd pool set meta0 target_size_ratio 0
+ceph osd pool set bulk0 target_size_ratio 0
+wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_BYTES_OVERCOMMITTED"
+
+ceph osd pool set meta0 target_size_bytes 1000
+ceph osd pool set bulk0 target_size_bytes 1000
+ceph osd pool set meta0 target_size_ratio 1
+wait_for 60 "ceph health detail | grep POOL_HAS_TARGET_SIZE_BYTES_AND_RATIO"
+
+# test autoscale warn
+
+ceph osd pool create warn0 1 --autoscale-mode=warn
+wait_for 120 "ceph health detail | grep POOL_TOO_FEW_PGS"
+
+ceph osd pool create warn1 256 --autoscale-mode=warn
+wait_for 120 "ceph health detail | grep POOL_TOO_MANY_PGS"
+
+ceph osd pool rm meta0 meta0 --yes-i-really-really-mean-it
+ceph osd pool rm bulk0 bulk0 --yes-i-really-really-mean-it
+ceph osd pool rm bulk1 bulk1 --yes-i-really-really-mean-it
+ceph osd pool rm bulk2 bulk2 --yes-i-really-really-mean-it
+ceph osd pool rm warn0 warn0 --yes-i-really-really-mean-it
+ceph osd pool rm warn1 warn1 --yes-i-really-really-mean-it
+
+echo OK
+
diff --git a/qa/workunits/mon/ping.py b/qa/workunits/mon/ping.py
new file mode 100755
index 000000000..1f6d0a1dd
--- /dev/null
+++ b/qa/workunits/mon/ping.py
@@ -0,0 +1,106 @@
+#!/usr/bin/python3
+
+import json
+import shlex
+import subprocess
+
+
+class UnexpectedReturn(Exception):
+ def __init__(self, cmd, ret, expected, msg):
+ if isinstance(cmd, list):
+ self.cmd = ' '.join(cmd)
+ else:
+ assert isinstance(cmd, str), \
+ 'cmd needs to be either a list or a str'
+ self.cmd = cmd
+ self.cmd = str(self.cmd)
+ self.ret = int(ret)
+ self.expected = int(expected)
+ self.msg = str(msg)
+
+ def __str__(self):
+ return repr('{c}: expected return {e}, got {r} ({o})'.format(
+ c=self.cmd, e=self.expected, r=self.ret, o=self.msg))
+
+
+def call(cmd):
+ if isinstance(cmd, list):
+ args = cmd
+ elif isinstance(cmd, str):
+ args = shlex.split(cmd)
+ else:
+ assert False, 'cmd is not a string/unicode nor a list!'
+
+ print('call: {0}'.format(args))
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ procout, procerr = proc.communicate(None)
+
+ return proc.returncode, procout, procerr
+
+
+def expect(cmd, expected_ret):
+ try:
+ (r, out, err) = call(cmd)
+ except ValueError as e:
+ assert False, \
+ 'unable to run {c}: {err}'.format(c=repr(cmd), err=str(e))
+
+ if r != expected_ret:
+ raise UnexpectedReturn(repr(cmd), r, expected_ret, err)
+
+ return out.decode() if isinstance(out, bytes) else out
+
+
+def get_quorum_status(timeout=300):
+ cmd = 'ceph quorum_status'
+ if timeout > 0:
+ cmd += ' --connect-timeout {0}'.format(timeout)
+
+ out = expect(cmd, 0)
+ j = json.loads(out)
+ return j
+
+
+def main():
+ quorum_status = get_quorum_status()
+ mon_names = [mon['name'] for mon in quorum_status['monmap']['mons']]
+
+ print('ping all monitors')
+ for m in mon_names:
+ print('ping mon.{0}'.format(m))
+ out = expect('ceph ping mon.{0}'.format(m), 0)
+ reply = json.loads(out)
+
+ assert reply['mon_status']['name'] == m, \
+ 'reply obtained from mon.{0}, expected mon.{1}'.format(
+ reply['mon_status']['name'], m)
+
+ print('test out-of-quorum reply')
+ for m in mon_names:
+ print('testing mon.{0}'.format(m))
+ expect('ceph daemon mon.{0} quorum exit'.format(m), 0)
+
+ quorum_status = get_quorum_status()
+ assert m not in quorum_status['quorum_names'], \
+ 'mon.{0} was not supposed to be in quorum ({1})'.format(
+ m, quorum_status['quorum_names'])
+
+ out = expect('ceph ping mon.{0}'.format(m), 0)
+ reply = json.loads(out)
+ mon_status = reply['mon_status']
+
+ assert mon_status['name'] == m, \
+ 'reply obtained from mon.{0}, expected mon.{1}'.format(
+ mon_status['name'], m)
+
+ assert mon_status['state'] == 'electing', \
+ 'mon.{0} is in state {1}, expected electing'.format(
+ m, mon_status['state'])
+
+ expect('ceph daemon mon.{0} quorum enter'.format(m), 0)
+
+ print('OK')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/qa/workunits/mon/pool_ops.sh b/qa/workunits/mon/pool_ops.sh
new file mode 100755
index 000000000..23bb3c0be
--- /dev/null
+++ b/qa/workunits/mon/pool_ops.sh
@@ -0,0 +1,104 @@
+#!/usr/bin/env bash
+
+set -ex
+
+function expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+function get_config_value_or_die()
+{
+ local pool_name config_opt raw val
+
+ pool_name=$1
+ config_opt=$2
+
+ raw="`$SUDO ceph osd pool get $pool_name $config_opt 2>/dev/null`"
+ if [[ $? -ne 0 ]]; then
+ echo "error obtaining config opt '$config_opt' from '$pool_name': $raw"
+ exit 1
+ fi
+
+ raw=`echo $raw | sed -e 's/[{} "]//g'`
+ val=`echo $raw | cut -f2 -d:`
+
+ echo "$val"
+ return 0
+}
+
+function expect_config_value()
+{
+ local pool_name config_opt expected_val val
+ pool_name=$1
+ config_opt=$2
+ expected_val=$3
+
+ val=$(get_config_value_or_die $pool_name $config_opt)
+
+ if [[ "$val" != "$expected_val" ]]; then
+ echo "expected '$expected_val', got '$val'"
+ exit 1
+ fi
+}
+
+# pg_num min/max
+TEST_POOL=testpool1234
+ceph osd pool create testpool1234 8 --autoscale-mode off
+ceph osd pool set $TEST_POOL pg_num_min 2
+ceph osd pool get $TEST_POOL pg_num_min | grep 2
+ceph osd pool set $TEST_POOL pg_num_max 33
+ceph osd pool get $TEST_POOL pg_num_max | grep 33
+expect_false ceph osd pool set $TEST_POOL pg_num_min 9
+expect_false ceph osd pool set $TEST_POOL pg_num_max 7
+expect_false ceph osd pool set $TEST_POOL pg_num 1
+expect_false ceph osd pool set $TEST_POOL pg_num 44
+ceph osd pool set $TEST_POOL pg_num_min 0
+expect_false ceph osd pool get $TEST_POOL pg_num_min
+ceph osd pool set $TEST_POOL pg_num_max 0
+expect_false ceph osd pool get $TEST_POOL pg_num_max
+ceph osd pool delete $TEST_POOL $TEST_POOL --yes-i-really-really-mean-it
+
+# note: we need to pass the other args or ceph_argparse.py will take
+# 'invalid' that is not replicated|erasure and assume it is the next
+# argument, which is a string.
+expect_false ceph osd pool create foo 123 123 invalid foo-profile foo-rule
+
+ceph osd pool create foo 123 123 replicated
+ceph osd pool create fooo 123 123 erasure default
+ceph osd pool create foooo 123
+
+ceph osd pool create foo 123 # idempotent
+
+ceph osd pool set foo size 1 --yes-i-really-mean-it
+expect_config_value "foo" "min_size" 1
+ceph osd pool set foo size 4
+expect_config_value "foo" "min_size" 2
+ceph osd pool set foo size 10
+expect_config_value "foo" "min_size" 5
+expect_false ceph osd pool set foo size 0
+expect_false ceph osd pool set foo size 20
+
+# should fail due to safety interlock
+expect_false ceph osd pool delete foo
+expect_false ceph osd pool delete foo foo
+expect_false ceph osd pool delete foo foo --force
+expect_false ceph osd pool delete foo fooo --yes-i-really-mean-it
+expect_false ceph osd pool delete foo --yes-i-really-mean-it foo
+
+ceph osd pool delete foooo foooo --yes-i-really-really-mean-it
+ceph osd pool delete fooo fooo --yes-i-really-really-mean-it
+ceph osd pool delete foo foo --yes-i-really-really-mean-it
+
+# idempotent
+ceph osd pool delete foo foo --yes-i-really-really-mean-it
+ceph osd pool delete fooo fooo --yes-i-really-really-mean-it
+ceph osd pool delete fooo fooo --yes-i-really-really-mean-it
+
+# non-existent pool
+ceph osd pool delete fuggg fuggg --yes-i-really-really-mean-it
+
+echo OK
+
+
diff --git a/qa/workunits/mon/rbd_snaps_ops.sh b/qa/workunits/mon/rbd_snaps_ops.sh
new file mode 100755
index 000000000..eb88565ea
--- /dev/null
+++ b/qa/workunits/mon/rbd_snaps_ops.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+
+# attempt to trigger #6047
+
+
+cmd_no=0
+expect()
+{
+ cmd_no=$(($cmd_no+1))
+ cmd="$1"
+ expected=$2
+ echo "[$cmd_no] $cmd"
+ eval $cmd
+ ret=$?
+ if [[ $ret -ne $expected ]]; then
+ echo "[$cmd_no] unexpected return '$ret', expected '$expected'"
+ exit 1
+ fi
+}
+
+ceph osd pool delete test test --yes-i-really-really-mean-it || true
+expect 'ceph osd pool create test 8 8' 0
+expect 'ceph osd pool application enable test rbd'
+expect 'ceph osd pool mksnap test snapshot' 0
+expect 'ceph osd pool rmsnap test snapshot' 0
+
+expect 'rbd --pool=test --rbd_validate_pool=false create --size=102400 image' 0
+expect 'rbd --pool=test snap create image@snapshot' 22
+
+expect 'ceph osd pool delete test test --yes-i-really-really-mean-it' 0
+expect 'ceph osd pool create test 8 8' 0
+expect 'rbd --pool=test pool init' 0
+expect 'rbd --pool=test create --size=102400 image' 0
+expect 'rbd --pool=test snap create image@snapshot' 0
+expect 'rbd --pool=test snap ls image' 0
+expect 'rbd --pool=test snap rm image@snapshot' 0
+
+expect 'ceph osd pool mksnap test snapshot' 22
+
+expect 'ceph osd pool delete test test --yes-i-really-really-mean-it' 0
+
+# reproduce 7210 and expect it to be fixed
+# basically create such a scenario where we end up deleting what used to
+# be an unmanaged snapshot from a not-unmanaged pool
+
+ceph osd pool delete test-foo test-foo --yes-i-really-really-mean-it || true
+expect 'ceph osd pool create test-foo 8' 0
+expect 'ceph osd pool application enable test-foo rbd'
+expect 'rbd --pool test-foo create --size 1024 image' 0
+expect 'rbd --pool test-foo snap create image@snapshot' 0
+
+ceph osd pool delete test-bar test-bar --yes-i-really-really-mean-it || true
+expect 'ceph osd pool create test-bar 8' 0
+expect 'ceph osd pool application enable test-bar rbd'
+expect 'rados cppool test-foo test-bar --yes-i-really-mean-it' 0
+expect 'rbd --pool test-bar snap rm image@snapshot' 95
+expect 'ceph osd pool delete test-foo test-foo --yes-i-really-really-mean-it' 0
+expect 'ceph osd pool delete test-bar test-bar --yes-i-really-really-mean-it' 0
+
+
+echo OK
diff --git a/qa/workunits/mon/test_config_key_caps.sh b/qa/workunits/mon/test_config_key_caps.sh
new file mode 100755
index 000000000..77b4b53b7
--- /dev/null
+++ b/qa/workunits/mon/test_config_key_caps.sh
@@ -0,0 +1,201 @@
+#!/usr/bin/env bash
+
+set -x
+set -e
+
+tmp=$(mktemp -d -p /tmp test_mon_config_key_caps.XXXXX)
+entities=()
+
+function cleanup()
+{
+ set +e
+ set +x
+ if [[ -e $tmp/keyring ]] && [[ -e $tmp/keyring.orig ]]; then
+ grep '\[.*\..*\]' $tmp/keyring.orig > $tmp/entities.orig
+ for e in $(grep '\[.*\..*\]' $tmp/keyring | \
+ diff $tmp/entities.orig - | \
+ sed -n 's/^.*\[\(.*\..*\)\]/\1/p');
+ do
+ ceph auth rm $e 2>&1 >& /dev/null
+ done
+ fi
+ #rm -fr $tmp
+}
+
+trap cleanup 0 # cleanup on exit
+
+function expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+# for cleanup purposes
+ceph auth export -o $tmp/keyring.orig
+
+k=$tmp/keyring
+
+# setup a few keys
+ceph config-key ls
+ceph config-key set daemon-private/osd.123/test-foo
+ceph config-key set mgr/test-foo
+ceph config-key set device/test-foo
+ceph config-key set test/foo
+
+allow_aa=client.allow_aa
+allow_bb=client.allow_bb
+allow_cc=client.allow_cc
+
+mgr_a=mgr.a
+mgr_b=mgr.b
+osd_a=osd.100
+osd_b=osd.200
+
+prefix_aa=client.prefix_aa
+prefix_bb=client.prefix_bb
+prefix_cc=client.prefix_cc
+match_aa=client.match_aa
+match_bb=client.match_bb
+
+fail_aa=client.fail_aa
+fail_bb=client.fail_bb
+fail_cc=client.fail_cc
+fail_dd=client.fail_dd
+fail_ee=client.fail_ee
+fail_ff=client.fail_ff
+fail_gg=client.fail_gg
+fail_writes=client.fail_writes
+
+ceph auth get-or-create $allow_aa mon 'allow *'
+ceph auth get-or-create $allow_bb mon 'allow service config-key rwx'
+ceph auth get-or-create $allow_cc mon 'allow command "config-key get"'
+
+ceph auth get-or-create $mgr_a mon 'allow profile mgr'
+ceph auth get-or-create $mgr_b mon 'allow profile mgr'
+ceph auth get-or-create $osd_a mon 'allow profile osd'
+ceph auth get-or-create $osd_b mon 'allow profile osd'
+
+ceph auth get-or-create $prefix_aa mon \
+ "allow command \"config-key get\" with key prefix client/$prefix_aa"
+
+cap="allow command \"config-key set\" with key prefix client/"
+cap="$cap,allow command \"config-key get\" with key prefix client/$prefix_bb"
+ceph auth get-or-create $prefix_bb mon "$cap"
+
+cap="allow command \"config-key get\" with key prefix client/"
+cap="$cap, allow command \"config-key set\" with key prefix client/"
+cap="$cap, allow command \"config-key ls\""
+ceph auth get-or-create $prefix_cc mon "$cap"
+
+cap="allow command \"config-key get\" with key=client/$match_aa/foo"
+ceph auth get-or-create $match_aa mon "$cap"
+cap="allow command \"config-key get\" with key=client/$match_bb/foo"
+cap="$cap,allow command \"config-key set\" with key=client/$match_bb/foo"
+ceph auth get-or-create $match_bb mon "$cap"
+
+ceph auth get-or-create $fail_aa mon 'allow rx'
+ceph auth get-or-create $fail_bb mon 'allow r,allow w'
+ceph auth get-or-create $fail_cc mon 'allow rw'
+ceph auth get-or-create $fail_dd mon 'allow rwx'
+ceph auth get-or-create $fail_ee mon 'allow profile bootstrap-rgw'
+ceph auth get-or-create $fail_ff mon 'allow profile bootstrap-rbd'
+# write commands will require rw; wx is not enough
+ceph auth get-or-create $fail_gg mon 'allow service config-key wx'
+# read commands will only require 'r'; 'rx' should be enough.
+ceph auth get-or-create $fail_writes mon 'allow service config-key rx'
+
+# grab keyring
+ceph auth export -o $k
+
+# keys will all the caps can do whatever
+for c in $allow_aa $allow_bb $allow_cc $mgr_a $mgr_b; do
+ ceph -k $k --name $c config-key get daemon-private/osd.123/test-foo
+ ceph -k $k --name $c config-key get mgr/test-foo
+ ceph -k $k --name $c config-key get device/test-foo
+ ceph -k $k --name $c config-key get test/foo
+done
+
+for c in $osd_a $osd_b; do
+ ceph -k $k --name $c config-key put daemon-private/$c/test-foo
+ ceph -k $k --name $c config-key get daemon-private/$c/test-foo
+ expect_false ceph -k $k --name $c config-key ls
+ expect_false ceph -k $k --name $c config-key get mgr/test-foo
+ expect_false ceph -k $k --name $c config-key get device/test-foo
+ expect_false ceph -k $k --name $c config-key get test/foo
+done
+
+expect_false ceph -k $k --name $osd_a get daemon-private/$osd_b/test-foo
+expect_false ceph -k $k --name $osd_b get daemon-private/$osd_a/test-foo
+
+expect_false ceph -k $k --name $prefix_aa \
+ config-key ls
+expect_false ceph -k $k --name $prefix_aa \
+ config-key get daemon-private/osd.123/test-foo
+expect_false ceph -k $k --name $prefix_aa \
+ config-key set test/bar
+expect_false ceph -k $k --name $prefix_aa \
+ config-key set client/$prefix_aa/foo
+
+# write something so we can read, use a custom entity
+ceph -k $k --name $allow_bb config-key set client/$prefix_aa/foo
+ceph -k $k --name $prefix_aa config-key get client/$prefix_aa/foo
+# check one writes to the other's prefix, the other is able to read
+ceph -k $k --name $prefix_bb config-key set client/$prefix_aa/bar
+ceph -k $k --name $prefix_aa config-key get client/$prefix_aa/bar
+
+ceph -k $k --name $prefix_bb config-key set client/$prefix_bb/foo
+ceph -k $k --name $prefix_bb config-key get client/$prefix_bb/foo
+
+expect_false ceph -k $k --name $prefix_bb config-key get client/$prefix_aa/bar
+expect_false ceph -k $k --name $prefix_bb config-key ls
+expect_false ceph -k $k --name $prefix_bb \
+ config-key get daemon-private/osd.123/test-foo
+expect_false ceph -k $k --name $prefix_bb config-key get mgr/test-foo
+expect_false ceph -k $k --name $prefix_bb config-key get device/test-foo
+expect_false ceph -k $k --name $prefix_bb config-key get test/bar
+expect_false ceph -k $k --name $prefix_bb config-key set test/bar
+
+ceph -k $k --name $prefix_cc config-key set client/$match_aa/foo
+ceph -k $k --name $prefix_cc config-key set client/$match_bb/foo
+ceph -k $k --name $prefix_cc config-key get client/$match_aa/foo
+ceph -k $k --name $prefix_cc config-key get client/$match_bb/foo
+expect_false ceph -k $k --name $prefix_cc config-key set other/prefix
+expect_false ceph -k $k --name $prefix_cc config-key get mgr/test-foo
+ceph -k $k --name $prefix_cc config-key ls >& /dev/null
+
+ceph -k $k --name $match_aa config-key get client/$match_aa/foo
+expect_false ceph -k $k --name $match_aa config-key get client/$match_bb/foo
+expect_false ceph -k $k --name $match_aa config-key set client/$match_aa/foo
+ceph -k $k --name $match_bb config-key get client/$match_bb/foo
+ceph -k $k --name $match_bb config-key set client/$match_bb/foo
+expect_false ceph -k $k --name $match_bb config-key get client/$match_aa/foo
+expect_false ceph -k $k --name $match_bb config-key set client/$match_aa/foo
+
+keys=(daemon-private/osd.123/test-foo
+ mgr/test-foo
+ device/test-foo
+ test/foo
+ client/$prefix_aa/foo
+ client/$prefix_bb/foo
+ client/$match_aa/foo
+ client/$match_bb/foo
+)
+# expect these all to fail accessing config-key
+for c in $fail_aa $fail_bb $fail_cc \
+ $fail_dd $fail_ee $fail_ff \
+ $fail_gg; do
+ for m in get set; do
+ for key in ${keys[*]} client/$prefix_aa/foo client/$prefix_bb/foo; do
+ expect_false ceph -k $k --name $c config-key $m $key
+ done
+ done
+done
+
+# fail writes but succeed on reads
+expect_false ceph -k $k --name $fail_writes config-key set client/$match_aa/foo
+expect_false ceph -k $k --name $fail_writes config-key set test/foo
+ceph -k $k --name $fail_writes config-key ls
+ceph -k $k --name $fail_writes config-key get client/$match_aa/foo
+ceph -k $k --name $fail_writes config-key get daemon-private/osd.123/test-foo
+
+echo "OK"
diff --git a/qa/workunits/mon/test_mon_config_key.py b/qa/workunits/mon/test_mon_config_key.py
new file mode 100755
index 000000000..f81804c8a
--- /dev/null
+++ b/qa/workunits/mon/test_mon_config_key.py
@@ -0,0 +1,463 @@
+#!/usr/bin/python3
+#
+# test_mon_config_key - Test 'ceph config-key' interface
+#
+# Copyright (C) 2013 Inktank
+#
+# This is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License version 2.1, as published by the Free Software
+# Foundation. See file COPYING.
+#
+import argparse
+import base64
+import errno
+import json
+import logging
+import os
+import random
+import string
+import subprocess
+import sys
+import time
+from typing import List, Dict
+
+#
+# Accepted Environment variables:
+# CEPH_TEST_VERBOSE - be more verbose; '1' enables; '0' disables
+# CEPH_TEST_DURATION - test duration in seconds
+# CEPH_TEST_SEED - seed to be used during the test
+#
+# Accepted arguments and options (see --help):
+# -v, --verbose - be more verbose
+# -d, --duration SECS - test duration in seconds
+# -s, --seed SEED - seed to be used during the test
+#
+
+
+LOG = logging.getLogger(os.path.basename(sys.argv[0].replace('.py', '')))
+
+SIZES = [
+ (0, 0),
+ (10, 0),
+ (25, 0),
+ (50, 0),
+ (100, 0),
+ (1000, 0),
+ (64 * 1024, 0),
+ (64 * 1024 + 1, -errno.EFBIG),
+ (128 * 1024, -errno.EFBIG)
+]
+
+# tests will be randomly selected from the keys here, and the test
+# suboperation will be randomly selected from the list in the values
+# here. i.e. 'exists/existing' would test that a key the test put into
+# the store earlier actually does still exist in the config store,
+# and that's a separate test case from 'exists/enoent', which tests
+# nonexistence of a key known to not be present.
+
+OPS = {
+ 'put': ['existing', 'new'],
+ 'del': ['existing', 'enoent'],
+ 'exists': ['existing', 'enoent'],
+ 'get': ['existing', 'enoent'],
+ 'list': ['existing', 'enoent'],
+ 'dump': ['existing', 'enoent'],
+}
+
+CONFIG_PUT: List[str] = [] # list: keys
+CONFIG_DEL: List[str] = [] # list: keys
+CONFIG_EXISTING: Dict[str, int] = {} # map: key -> size
+
+
+def run_cmd(cmd, expects=0):
+ full_cmd = ['ceph', 'config-key'] + cmd
+
+ if expects < 0:
+ expects = -expects
+
+ cmdlog = LOG.getChild('run_cmd')
+ cmdlog.debug('{fc}'.format(fc=' '.join(full_cmd)))
+
+ proc = subprocess.run(full_cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ if proc.returncode != expects:
+ cmdlog.error(f'cmd > {proc.args}')
+ cmdlog.error(f'expected return "{expects}" got "{proc.returncode}"')
+ cmdlog.error('stdout')
+ cmdlog.error(proc.stdout)
+ cmdlog.error('stderr')
+ cmdlog.error(proc.stderr)
+
+
+def gen_data(size, rnd):
+ chars = string.ascii_letters + string.digits
+ return ''.join(rnd.choice(chars) for _ in range(size))
+
+
+def gen_key(rnd):
+ return gen_data(20, rnd)
+
+
+def gen_tmp_file_path(rnd):
+ file_name = gen_data(20, rnd)
+ file_path = os.path.join('/tmp', 'ceph-test.' + file_name)
+ return file_path
+
+
+def destroy_tmp_file(fpath):
+ if os.path.exists(fpath) and os.path.isfile(fpath):
+ os.unlink(fpath)
+
+
+def write_data_file(data, rnd):
+ file_path = gen_tmp_file_path(rnd)
+ data_file = open(file_path, 'a+')
+ data_file.truncate()
+ data_file.write(data)
+ data_file.close()
+ return file_path
+
+
+# end write_data_file
+
+def choose_random_op(rnd):
+ op = rnd.choice(
+ list(OPS.keys())
+ )
+ sop = rnd.choice(OPS[op])
+ return op, sop
+
+
+def parse_args(args):
+ parser = argparse.ArgumentParser(
+ description="Test the monitor's 'config-key' API",
+ )
+ parser.add_argument(
+ '-v', '--verbose',
+ action='store_true',
+ help='be more verbose',
+ )
+ parser.add_argument(
+ '-s', '--seed',
+ metavar='SEED',
+ help='use SEED instead of generating it in run-time',
+ )
+ parser.add_argument(
+ '-d', '--duration',
+ metavar='SECS',
+ help='run test for SECS seconds (default: 300)',
+ )
+ parser.set_defaults(
+ seed=None,
+ duration=300,
+ verbose=False,
+ )
+ return parser.parse_args(args)
+
+
+def main():
+ args = parse_args(sys.argv[1:])
+
+ verbose = args.verbose
+ if os.environ.get('CEPH_TEST_VERBOSE') is not None:
+ verbose = (os.environ.get('CEPH_TEST_VERBOSE') == '1')
+
+ duration = int(os.environ.get('CEPH_TEST_DURATION', args.duration))
+ seed = os.environ.get('CEPH_TEST_SEED', args.seed)
+ seed = int(time.time()) if seed is None else int(seed)
+
+ rnd = random.Random()
+ rnd.seed(seed)
+
+ loglevel = logging.INFO
+ if verbose:
+ loglevel = logging.DEBUG
+
+ logging.basicConfig(level=loglevel)
+
+ LOG.info('seed: {s}'.format(s=seed))
+
+ start = time.time()
+
+ while (time.time() - start) < duration:
+ (op, sop) = choose_random_op(rnd)
+
+ LOG.info('{o}({s})'.format(o=op, s=sop))
+ op_log = LOG.getChild('{o}({s})'.format(o=op, s=sop))
+
+ if op == 'put':
+ via_file = (rnd.uniform(0, 100) < 50.0)
+
+ expected = 0
+ cmd = ['put']
+ key = None
+
+ if sop == 'existing':
+ if len(CONFIG_EXISTING) == 0:
+ op_log.debug('no existing keys; continue')
+ continue
+ key = rnd.choice(CONFIG_PUT)
+ assert key in CONFIG_EXISTING, \
+ "key '{k_}' not in CONFIG_EXISTING".format(k_=key)
+
+ expected = 0 # the store just overrides the value if the key exists
+ # end if sop == 'existing'
+ elif sop == 'new':
+ for x in range(0, 10):
+ key = gen_key(rnd)
+ if key not in CONFIG_EXISTING:
+ break
+ key = None
+ if key is None:
+ op_log.error('unable to generate an unique key -- try again later.')
+ continue
+
+ assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \
+ 'key {k} was not supposed to exist!'.format(k=key)
+
+ assert key is not None, \
+ 'key must be != None'
+
+ cmd += [key]
+
+ (size, error) = rnd.choice(SIZES)
+ if size > 25:
+ via_file = True
+
+ data = gen_data(size, rnd)
+
+ if error == 0: # only add if we expect the put to be successful
+ if sop == 'new':
+ CONFIG_PUT.append(key)
+ CONFIG_EXISTING[key] = size
+ expected = error
+
+ if via_file:
+ data_file = write_data_file(data, rnd)
+ cmd += ['-i', data_file]
+ else:
+ cmd += [data]
+
+ op_log.debug('size: {sz}, via: {v}'.format(
+ sz=size,
+ v='file: {f}'.format(f=data_file) if via_file == True else 'cli')
+ )
+ run_cmd(cmd, expects=expected)
+ if via_file:
+ destroy_tmp_file(data_file)
+ continue
+
+ elif op == 'del':
+ expected = 0
+ cmd = ['del']
+ key = None
+
+ if sop == 'existing':
+ if len(CONFIG_EXISTING) == 0:
+ op_log.debug('no existing keys; continue')
+ continue
+ key = rnd.choice(CONFIG_PUT)
+ assert key in CONFIG_EXISTING, \
+ "key '{k_}' not in CONFIG_EXISTING".format(k_=key)
+
+ if sop == 'enoent':
+ for x in range(0, 10):
+ key = base64.b64encode(os.urandom(20)).decode()
+ if key not in CONFIG_EXISTING:
+ break
+ key = None
+ if key is None:
+ op_log.error('unable to generate an unique key -- try again later.')
+ continue
+ assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \
+ 'key {k} was not supposed to exist!'.format(k=key)
+ expected = 0 # deleting a non-existent key succeeds
+
+ assert key is not None, \
+ 'key must be != None'
+
+ cmd += [key]
+ op_log.debug('key: {k}'.format(k=key))
+ run_cmd(cmd, expects=expected)
+ if sop == 'existing':
+ CONFIG_DEL.append(key)
+ CONFIG_PUT.remove(key)
+ del CONFIG_EXISTING[key]
+ continue
+
+ elif op == 'exists':
+ expected = 0
+ cmd = ['exists']
+ key = None
+
+ if sop == 'existing':
+ if len(CONFIG_EXISTING) == 0:
+ op_log.debug('no existing keys; continue')
+ continue
+ key = rnd.choice(CONFIG_PUT)
+ assert key in CONFIG_EXISTING, \
+ "key '{k_}' not in CONFIG_EXISTING".format(k_=key)
+
+ if sop == 'enoent':
+ for x in range(0, 10):
+ key = base64.b64encode(os.urandom(20)).decode()
+ if key not in CONFIG_EXISTING:
+ break
+ key = None
+ if key is None:
+ op_log.error('unable to generate an unique key -- try again later.')
+ continue
+ assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \
+ 'key {k} was not supposed to exist!'.format(k=key)
+ expected = -errno.ENOENT
+
+ assert key is not None, \
+ 'key must be != None'
+
+ cmd += [key]
+ op_log.debug('key: {k}'.format(k=key))
+ run_cmd(cmd, expects=expected)
+ continue
+
+ elif op == 'get':
+ expected = 0
+ cmd = ['get']
+ key = None
+
+ if sop == 'existing':
+ if len(CONFIG_EXISTING) == 0:
+ op_log.debug('no existing keys; continue')
+ continue
+ key = rnd.choice(CONFIG_PUT)
+ assert key in CONFIG_EXISTING, \
+ "key '{k_}' not in CONFIG_EXISTING".format(k_=key)
+
+ if sop == 'enoent':
+ for x in range(0, 10):
+ key = base64.b64encode(os.urandom(20)).decode()
+ if key not in CONFIG_EXISTING:
+ break
+ key = None
+ if key is None:
+ op_log.error('unable to generate an unique key -- try again later.')
+ continue
+ assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \
+ 'key {k} was not supposed to exist!'.format(k=key)
+ expected = -errno.ENOENT
+
+ assert key is not None, \
+ 'key must be != None'
+
+ file_path = gen_tmp_file_path(rnd)
+ cmd += [key, '-o', file_path]
+ op_log.debug('key: {k}'.format(k=key))
+ run_cmd(cmd, expects=expected)
+ if sop == 'existing':
+ try:
+ temp_file = open(file_path, 'r+')
+ except IOError as err:
+ if err.errno == errno.ENOENT:
+ assert CONFIG_EXISTING[key] == 0, \
+ "error opening '{fp}': {e}".format(fp=file_path, e=err)
+ continue
+ else:
+ assert False, \
+ 'some error occurred: {e}'.format(e=err)
+ cnt = 0
+ while True:
+ read_data = temp_file.read()
+ if read_data == '':
+ break
+ cnt += len(read_data)
+ assert cnt == CONFIG_EXISTING[key], \
+ "wrong size from store for key '{k}': {sz}, expected {es}".format(
+ k=key, sz=cnt, es=CONFIG_EXISTING[key])
+ destroy_tmp_file(file_path)
+ continue
+
+ elif op == 'list' or op == 'dump':
+ expected = 0
+ cmd = [op]
+ key = None
+
+ if sop == 'existing':
+ if len(CONFIG_EXISTING) == 0:
+ op_log.debug('no existing keys; continue')
+ continue
+ key = rnd.choice(CONFIG_PUT)
+ assert key in CONFIG_EXISTING, \
+ "key '{k_}' not in CONFIG_EXISTING".format(k_=key)
+
+ if sop == 'enoent':
+ for x in range(0, 10):
+ key = base64.b64encode(os.urandom(20)).decode()
+ if key not in CONFIG_EXISTING:
+ break
+ key = None
+ if key is None:
+ op_log.error('unable to generate an unique key -- try again later.')
+ continue
+ assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \
+ 'key {k} was not supposed to exist!'.format(k=key)
+
+ assert key is not None, \
+ 'key must be != None'
+
+ file_path = gen_tmp_file_path(rnd)
+ cmd += ['-o', file_path]
+ op_log.debug('key: {k}'.format(k=key))
+ run_cmd(cmd, expects=expected)
+ try:
+ temp_file = open(file_path, 'r+')
+ except IOError as err:
+ if err.errno == errno.ENOENT:
+ assert CONFIG_EXISTING[key] == 0, \
+ "error opening '{fp}': {e}".format(fp=file_path, e=err)
+ continue
+ else:
+ assert False, \
+ 'some error occurred: {e}'.format(e=err)
+ cnt = 0
+ try:
+ read_data = json.load(temp_file)
+ except ValueError:
+ temp_file.seek(0)
+ assert False, "{op} output was not valid JSON:\n{filedata}".format(
+ op=op, filedata=temp_file.readlines())
+
+ if sop == 'existing':
+ assert key in read_data, "key '{k}' not found in list/dump output".format(k=key)
+ if op == 'dump':
+ cnt = len(read_data[key])
+ assert cnt == CONFIG_EXISTING[key], \
+ "wrong size from list for key '{k}': {sz}, expected {es}".format(
+ k=key, sz=cnt, es=CONFIG_EXISTING[key])
+ elif sop == 'enoent':
+ assert key not in read_data, "key '{k}' found in list/dump output".format(k=key)
+ destroy_tmp_file(file_path)
+ continue
+ else:
+ assert False, 'unknown op {o}'.format(o=op)
+
+ # check if all keys in 'CONFIG_PUT' exist and
+ # if all keys on 'CONFIG_DEL' don't.
+ # but first however, remove all keys in CONFIG_PUT that might
+ # be in CONFIG_DEL as well.
+ config_put_set = set(CONFIG_PUT)
+ config_del_set = set(CONFIG_DEL).difference(config_put_set)
+
+ LOG.info('perform sanity checks on store')
+
+ for k in config_put_set:
+ LOG.getChild('check(puts)').debug('key: {k_}'.format(k_=k))
+ run_cmd(['exists', k], expects=0)
+ for k in config_del_set:
+ LOG.getChild('check(dels)').debug('key: {k_}'.format(k_=k))
+ run_cmd(['exists', k], expects=-errno.ENOENT)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/qa/workunits/mon/test_mon_osdmap_prune.sh b/qa/workunits/mon/test_mon_osdmap_prune.sh
new file mode 100755
index 000000000..9cdd72179
--- /dev/null
+++ b/qa/workunits/mon/test_mon_osdmap_prune.sh
@@ -0,0 +1,205 @@
+#!/bin/bash
+
+. $(dirname $0)/../../standalone/ceph-helpers.sh
+
+set -x
+
+function wait_for_osdmap_manifest() {
+
+ local what=${1:-"true"}
+
+ local -a delays=($(get_timeout_delays $TIMEOUT .1))
+ local -i loop=0
+
+ for ((i=0; i < ${#delays[*]}; ++i)); do
+ has_manifest=$(ceph report | jq 'has("osdmap_manifest")')
+ if [[ "$has_manifest" == "$what" ]]; then
+ return 0
+ fi
+
+ sleep ${delays[$i]}
+ done
+
+ echo "osdmap_manifest never outputted on report"
+ ceph report
+ return 1
+}
+
+function wait_for_trim() {
+
+ local -i epoch=$1
+ local -a delays=($(get_timeout_delays $TIMEOUT .1))
+ local -i loop=0
+
+ for ((i=0; i < ${#delays[*]}; ++i)); do
+ fc=$(ceph report | jq '.osdmap_first_committed')
+ if [[ $fc -eq $epoch ]]; then
+ return 0
+ fi
+ sleep ${delays[$i]}
+ done
+
+ echo "never trimmed up to epoch $epoch"
+ ceph report
+ return 1
+}
+
+function test_osdmap() {
+
+ local epoch=$1
+ local ret=0
+
+ tmp_map=$(mktemp)
+ ceph osd getmap $epoch -o $tmp_map || return 1
+ if ! osdmaptool --print $tmp_map | grep "epoch $epoch" ; then
+ echo "ERROR: failed processing osdmap epoch $epoch"
+ ret=1
+ fi
+ rm $tmp_map
+ return $ret
+}
+
+function generate_osdmaps() {
+
+ local -i num=$1
+
+ cmds=( set unset )
+ for ((i=0; i < num; ++i)); do
+ ceph osd ${cmds[$((i%2))]} noup || return 1
+ done
+ return 0
+}
+
+function test_mon_osdmap_prune() {
+
+ create_pool foo 32
+ wait_for_clean || return 1
+
+ ceph config set mon mon_debug_block_osdmap_trim true || return 1
+
+ generate_osdmaps 500 || return 1
+
+ report="$(ceph report)"
+ fc=$(jq '.osdmap_first_committed' <<< $report)
+ lc=$(jq '.osdmap_last_committed' <<< $report)
+
+ [[ $((lc-fc)) -ge 500 ]] || return 1
+
+ wait_for_osdmap_manifest || return 1
+
+ manifest="$(ceph report | jq '.osdmap_manifest')"
+
+ first_pinned=$(jq '.first_pinned' <<< $manifest)
+ last_pinned=$(jq '.last_pinned' <<< $manifest)
+ pinned_maps=( $(jq '.pinned_maps[]' <<< $manifest) )
+
+ # validate pinned maps list
+ [[ $first_pinned -eq ${pinned_maps[0]} ]] || return 1
+ [[ $last_pinned -eq ${pinned_maps[-1]} ]] || return 1
+
+ # validate pinned maps range
+ [[ $first_pinned -lt $last_pinned ]] || return 1
+ [[ $last_pinned -lt $lc ]] || return 1
+ [[ $first_pinned -eq $fc ]] || return 1
+
+ # ensure all the maps are available, and work as expected
+ # this can take a while...
+
+ for ((i=$first_pinned; i <= $last_pinned; ++i)); do
+ test_osdmap $i || return 1
+ done
+
+ # update pinned maps state:
+ # the monitor may have pruned & pinned additional maps since we last
+ # assessed state, given it's an iterative process.
+ #
+ manifest="$(ceph report | jq '.osdmap_manifest')"
+ first_pinned=$(jq '.first_pinned' <<< $manifest)
+ last_pinned=$(jq '.last_pinned' <<< $manifest)
+ pinned_maps=( $(jq '.pinned_maps[]' <<< $manifest) )
+
+ # test trimming maps
+ #
+ # we're going to perform the following tests:
+ #
+ # 1. force trim to a pinned map
+ # 2. force trim to a pinned map's previous epoch
+ # 3. trim all maps except the last 200 or so.
+ #
+
+ # 1. force trim to a pinned map
+ #
+ [[ ${#pinned_maps[@]} -gt 10 ]] || return 1
+
+ trim_to=${pinned_maps[1]}
+ ceph config set mon mon_osd_force_trim_to $trim_to
+ ceph config set mon mon_min_osdmap_epochs 100
+ ceph config set mon paxos_service_trim_min 1
+ ceph config set mon mon_debug_block_osdmap_trim false
+
+ # generate an epoch so we get to trim maps
+ ceph osd set noup
+ ceph osd unset noup
+
+ wait_for_trim $trim_to || return 1
+
+ report="$(ceph report)"
+ fc=$(jq '.osdmap_first_committed' <<< $report)
+ [[ $fc -eq $trim_to ]] || return 1
+
+ old_first_pinned=$first_pinned
+ old_last_pinned=$last_pinned
+ first_pinned=$(jq '.osdmap_manifest.first_pinned' <<< $report)
+ last_pinned=$(jq '.osdmap_manifest.last_pinned' <<< $report)
+ [[ $first_pinned -eq $trim_to ]] || return 1
+ [[ $first_pinned -gt $old_first_pinned ]] || return 1
+ [[ $last_pinned -gt $old_first_pinned ]] || return 1
+
+ test_osdmap $trim_to || return 1
+ test_osdmap $(( trim_to+1 )) || return 1
+
+ pinned_maps=( $(jq '.osdmap_manifest.pinned_maps[]' <<< $report) )
+
+ # 2. force trim to a pinned map's previous epoch
+ #
+ [[ ${#pinned_maps[@]} -gt 2 ]] || return 1
+ trim_to=$(( ${pinned_maps[1]} - 1))
+ ceph config set mon mon_osd_force_trim_to $trim_to
+
+ # generate an epoch so we get to trim maps
+ ceph osd set noup
+ ceph osd unset noup
+
+ wait_for_trim $trim_to || return 1
+
+ report="$(ceph report)"
+ fc=$(jq '.osdmap_first_committed' <<< $report)
+ [[ $fc -eq $trim_to ]] || return 1
+
+ old_first_pinned=$first_pinned
+ old_last_pinned=$last_pinned
+ first_pinned=$(jq '.osdmap_manifest.first_pinned' <<< $report)
+ last_pinned=$(jq '.osdmap_manifest.last_pinned' <<< $report)
+ pinned_maps=( $(jq '.osdmap_manifest.pinned_maps[]' <<< $report) )
+ [[ $first_pinned -eq $trim_to ]] || return 1
+ [[ ${pinned_maps[1]} -eq $(( trim_to+1)) ]] || return 1
+
+ test_osdmap $first_pinned || return 1
+ test_osdmap $(( first_pinned + 1 )) || return 1
+
+ # 3. trim everything
+ #
+ ceph config set mon mon_osd_force_trim_to 0
+
+ # generate an epoch so we get to trim maps
+ ceph osd set noup
+ ceph osd unset noup
+
+ wait_for_osdmap_manifest "false" || return 1
+
+ return 0
+}
+
+test_mon_osdmap_prune || exit 1
+
+echo "OK"
diff --git a/qa/workunits/mon/test_noautoscale_flag.sh b/qa/workunits/mon/test_noautoscale_flag.sh
new file mode 100755
index 000000000..e1a45a4d8
--- /dev/null
+++ b/qa/workunits/mon/test_noautoscale_flag.sh
@@ -0,0 +1,104 @@
+#!/bin/bash -ex
+
+unset CEPH_CLI_TEST_DUP_COMMAND
+
+NUM_POOLS=$(ceph osd pool ls | wc -l)
+
+if [ "$NUM_POOLS" -gt 0 ]; then
+ echo "test requires no preexisting pools"
+ exit 1
+fi
+
+ceph osd pool set noautoscale
+
+ceph osd pool create pool_a
+
+echo 'pool_a autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_a | grep -o -m 1 'on\|off')
+
+NUM_POOLS=$[NUM_POOLS+1]
+
+sleep 2
+
+# Count the number of Pools with AUTOSCALE `off`
+
+RESULT1=$(ceph osd pool autoscale-status | grep -oe 'off' | wc -l)
+
+# number of Pools with AUTOSCALE `off` should equal to $NUM_POOLS
+
+test "$RESULT1" -eq "$NUM_POOLS"
+
+ceph osd pool unset noautoscale
+
+echo $(ceph osd pool get noautoscale)
+
+
+ceph osd pool create pool_b
+
+echo 'pool_a autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_a | grep -o -m 1 'on\|off')
+
+echo 'pool_b autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_b | grep -o -m 1 'on\|off')
+
+
+NUM_POOLS=$[NUM_POOLS+1]
+
+sleep 2
+
+# Count the number of Pools with AUTOSCALE `on`
+
+RESULT2=$(ceph osd pool autoscale-status | grep -oe 'on' | wc -l)
+
+# number of Pools with AUTOSCALE `on` should equal to 3
+
+test "$RESULT2" -eq "$[NUM_POOLS-1]"
+
+ceph osd pool set noautoscale
+
+ceph osd pool create pool_c
+
+echo 'pool_a autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_a | grep -o -m 1 'on\|off')
+
+echo 'pool_b autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_b | grep -o -m 1 'on\|off')
+
+echo 'pool_c autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_c | grep -o -m 1 'on\|off')
+
+
+NUM_POOLS=$[NUM_POOLS+1]
+
+sleep 2
+
+# Count the number of Pools with AUTOSCALE `off`
+
+RESULT3=$(ceph osd pool autoscale-status | grep -oe 'off' | wc -l)
+
+# number of Pools with AUTOSCALE `off` should equal to 4
+
+test "$RESULT3" -eq "$NUM_POOLS"
+
+# Now we test if we retain individual pool state of autoscale mode
+# when we set and unset the noautoscale flag.
+
+ceph osd pool unset noautoscale
+
+ceph osd pool set pool_a pg_autoscale_mode on
+
+ceph osd pool set pool_b pg_autoscale_mode warn
+
+ceph osd pool set noautoscale
+
+ceph osd pool unset noautoscale
+
+RESULT4=$(ceph osd pool autoscale-status | grep pool_a | grep -o -m 1 'on\|off\|warn')
+RESULT5=$(ceph osd pool autoscale-status | grep pool_b | grep -o -m 1 'on\|off\|warn')
+RESULT6=$(ceph osd pool autoscale-status | grep pool_c | grep -o -m 1 'on\|off\|warn')
+
+test "$RESULT4" == 'on'
+test "$RESULT5" == 'warn'
+test "$RESULT6" == 'off'
+
+ceph osd pool rm pool_a pool_a --yes-i-really-really-mean-it
+
+ceph osd pool rm pool_b pool_b --yes-i-really-really-mean-it
+
+ceph osd pool rm pool_c pool_c --yes-i-really-really-mean-it
+
+echo OK
diff --git a/qa/workunits/objectstore/test_fuse.sh b/qa/workunits/objectstore/test_fuse.sh
new file mode 100755
index 000000000..f1dcbd04f
--- /dev/null
+++ b/qa/workunits/objectstore/test_fuse.sh
@@ -0,0 +1,129 @@
+#!/bin/sh -ex
+
+if ! id -u | grep -q '^0$'; then
+ echo "not root, re-running self via sudo"
+ sudo PATH=$PATH TYPE=$TYPE $0
+ exit 0
+fi
+
+expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+COT=ceph-objectstore-tool
+DATA=store_test_fuse_dir
+[ -z "$TYPE" ] && TYPE=bluestore
+MNT=store_test_fuse_mnt
+
+rm -rf $DATA
+mkdir -p $DATA
+
+test -d $MNT && fusermount -u $MNT || true
+rmdir $MNT || true
+mkdir $MNT
+
+export CEPH_ARGS=--enable_experimental_unrecoverable_data_corrupting_features=bluestore
+
+$COT --no-mon-config --op mkfs --data-path $DATA --type $TYPE
+$COT --no-mon-config --op fuse --data-path $DATA --mountpoint $MNT &
+
+while ! test -e $MNT/type ; do
+ echo waiting for $MNT/type to appear
+ sleep 1
+done
+
+umask 0
+
+grep $TYPE $MNT/type
+
+# create collection
+mkdir $MNT/meta
+test -e $MNT/meta/bitwise_hash_start
+test -d $MNT/meta/all
+test -d $MNT/meta/by_bitwise_hash
+
+# create object
+mkdir $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#
+test -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
+test -d $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr
+test -d $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap
+test -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/bitwise_hash
+test -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap_header
+
+# omap header
+echo omap header > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap_header
+grep -q omap $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap_header
+
+# omap
+echo value a > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya
+echo value b > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb
+ls $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap | grep -c key | grep -q 2
+grep 'value a' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya
+grep 'value b' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb
+rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya
+test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya
+rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb
+test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb
+
+# attr
+echo value a > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya
+echo value b > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb
+ls $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr | grep -c key | grep -q 2
+grep 'value a' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya
+grep 'value b' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb
+rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya
+test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya
+rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb
+test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb
+
+# data
+test ! -s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
+echo asdfasdfasdf > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
+test -s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
+grep -q asdfasdfasdf $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
+truncate --size 4 $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
+stat --format=%s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data | grep -q ^4$
+expect_false grep -q asdfasdfasdf $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
+rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
+test ! -s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
+
+
+# create pg collection
+mkdir --mode 0003 $MNT/0.0_head
+grep -q 00000000 $MNT/0.0_head/bitwise_hash_start
+if [ "$TYPE" = "bluestore" ]; then
+ cat $MNT/0.0_head/bitwise_hash_bits
+ grep -q 3 $MNT/0.0_head/bitwise_hash_bits
+ grep -q 1fffffff $MNT/0.0_head/bitwise_hash_end
+fi
+test -d $MNT/0.0_head/all
+
+mkdir --mode 0003 $MNT/0.1_head
+grep -q 80000000 $MNT/0.1_head/bitwise_hash_start
+if [ "$TYPE" = "bluestore" ]; then
+ grep -q 3 $MNT/0.1_head/bitwise_hash_bits
+ grep -q 9fffffff $MNT/0.1_head/bitwise_hash_end
+fi
+
+# create pg object
+mkdir $MNT/0.0_head/all/#0:00000000::::head#/
+mkdir $MNT/0.0_head/all/#0:10000000:::foo:head#/
+
+# verify pg bounds check
+if [ "$TYPE" = "bluestore" ]; then
+ expect_false mkdir $MNT/0.0_head/all/#0:20000000:::bar:head#/
+fi
+
+# remove a collection
+expect_false rmdir $MNT/0.0_head
+rmdir $MNT/0.0_head/all/#0:10000000:::foo:head#/
+rmdir $MNT/0.0_head/all/#0:00000000::::head#/
+rmdir $MNT/0.0_head
+rmdir $MNT/0.1_head
+
+fusermount -u $MNT
+wait
+
+echo OK
diff --git a/qa/workunits/osdc/stress_objectcacher.sh b/qa/workunits/osdc/stress_objectcacher.sh
new file mode 100755
index 000000000..67baadc33
--- /dev/null
+++ b/qa/workunits/osdc/stress_objectcacher.sh
@@ -0,0 +1,28 @@
+#!/bin/sh -ex
+
+for i in $(seq 1 10)
+do
+ for DELAY in 0 1000
+ do
+ for OPS in 1000 10000
+ do
+ for OBJECTS in 10 50 100
+ do
+ for READS in 0.90 0.50 0.10
+ do
+ for OP_SIZE in 4096 131072 1048576
+ do
+ for MAX_DIRTY in 0 25165824
+ do
+ ceph_test_objectcacher_stress --ops $OPS --percent-read $READS --delay-ns $DELAY --objects $OBJECTS --max-op-size $OP_SIZE --client-oc-max-dirty $MAX_DIRTY --stress-test > /dev/null 2>&1
+ done
+ done
+ done
+ done
+ done
+ done
+done
+
+ceph_test_objectcacher_stress --correctness-test > /dev/null 2>&1
+
+echo OK
diff --git a/qa/workunits/post-file.sh b/qa/workunits/post-file.sh
new file mode 100755
index 000000000..120fb2634
--- /dev/null
+++ b/qa/workunits/post-file.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+set -ex
+
+what="$1"
+[ -z "$what" ] && what=/etc/udev/rules.d
+sudo ceph-post-file -d ceph-test-workunit $what
+
+echo OK
diff --git a/qa/workunits/rados/clone.sh b/qa/workunits/rados/clone.sh
new file mode 100755
index 000000000..281e89f71
--- /dev/null
+++ b/qa/workunits/rados/clone.sh
@@ -0,0 +1,13 @@
+#!/bin/sh -x
+
+set -e
+
+rados -p data rm foo || true
+rados -p data put foo.tmp /etc/passwd --object-locator foo
+rados -p data clonedata foo.tmp foo --object-locator foo
+rados -p data get foo /tmp/foo
+cmp /tmp/foo /etc/passwd
+rados -p data rm foo.tmp --object-locator foo
+rados -p data rm foo
+
+echo OK \ No newline at end of file
diff --git a/qa/workunits/rados/load-gen-big.sh b/qa/workunits/rados/load-gen-big.sh
new file mode 100755
index 000000000..6715658ec
--- /dev/null
+++ b/qa/workunits/rados/load-gen-big.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+rados -p rbd load-gen \
+ --num-objects 10240 \
+ --min-object-size 1048576 \
+ --max-object-size 25600000 \
+ --max-ops 1024 \
+ --max-backlog 1024 \
+ --read-percent 50 \
+ --run-length 1200
diff --git a/qa/workunits/rados/load-gen-mix-small-long.sh b/qa/workunits/rados/load-gen-mix-small-long.sh
new file mode 100755
index 000000000..593bad51d
--- /dev/null
+++ b/qa/workunits/rados/load-gen-mix-small-long.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+rados -p rbd load-gen \
+ --num-objects 1024 \
+ --min-object-size 1 \
+ --max-object-size 1048576 \
+ --max-ops 128 \
+ --max-backlog 128 \
+ --read-percent 50 \
+ --run-length 1800
diff --git a/qa/workunits/rados/load-gen-mix-small.sh b/qa/workunits/rados/load-gen-mix-small.sh
new file mode 100755
index 000000000..02db77bd0
--- /dev/null
+++ b/qa/workunits/rados/load-gen-mix-small.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+rados -p rbd load-gen \
+ --num-objects 1024 \
+ --min-object-size 1 \
+ --max-object-size 1048576 \
+ --max-ops 128 \
+ --max-backlog 128 \
+ --read-percent 50 \
+ --run-length 600
diff --git a/qa/workunits/rados/load-gen-mix.sh b/qa/workunits/rados/load-gen-mix.sh
new file mode 100755
index 000000000..ad3b4be84
--- /dev/null
+++ b/qa/workunits/rados/load-gen-mix.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+rados -p rbd load-gen \
+ --num-objects 10240 \
+ --min-object-size 1 \
+ --max-object-size 1048576 \
+ --max-ops 128 \
+ --max-backlog 128 \
+ --read-percent 50 \
+ --run-length 600
diff --git a/qa/workunits/rados/load-gen-mostlyread.sh b/qa/workunits/rados/load-gen-mostlyread.sh
new file mode 100755
index 000000000..236f82dd4
--- /dev/null
+++ b/qa/workunits/rados/load-gen-mostlyread.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+rados -p rbd load-gen \
+ --num-objects 51200 \
+ --min-object-size 1 \
+ --max-object-size 1048576 \
+ --max-ops 128 \
+ --max-backlog 128 \
+ --read-percent 90 \
+ --run-length 600
diff --git a/qa/workunits/rados/stress_watch.sh b/qa/workunits/rados/stress_watch.sh
new file mode 100755
index 000000000..49f144bbc
--- /dev/null
+++ b/qa/workunits/rados/stress_watch.sh
@@ -0,0 +1,7 @@
+#!/bin/sh -e
+
+ceph_test_stress_watch
+ceph_multi_stress_watch rep reppool repobj
+ceph_multi_stress_watch ec ecpool ecobj
+
+exit 0
diff --git a/qa/workunits/rados/test.sh b/qa/workunits/rados/test.sh
new file mode 100755
index 000000000..daa25fe4d
--- /dev/null
+++ b/qa/workunits/rados/test.sh
@@ -0,0 +1,62 @@
+#!/usr/bin/env bash
+set -ex
+
+parallel=1
+[ "$1" = "--serial" ] && parallel=0
+
+color=""
+[ -t 1 ] && color="--gtest_color=yes"
+
+function cleanup() {
+ pkill -P $$ || true
+}
+trap cleanup EXIT ERR HUP INT QUIT
+
+declare -A pids
+
+for f in \
+ api_aio api_aio_pp \
+ api_io api_io_pp \
+ api_asio api_list \
+ api_lock api_lock_pp \
+ api_misc api_misc_pp \
+ api_tier_pp \
+ api_pool \
+ api_snapshots api_snapshots_pp \
+ api_stat api_stat_pp \
+ api_watch_notify api_watch_notify_pp \
+ api_cmd api_cmd_pp \
+ api_service api_service_pp \
+ api_c_write_operations \
+ api_c_read_operations \
+ api_cls_remote_reads \
+ list_parallel \
+ open_pools_parallel \
+ delete_pools_parallel
+do
+ if [ $parallel -eq 1 ]; then
+ r=`printf '%25s' $f`
+ ff=`echo $f | awk '{print $1}'`
+ bash -o pipefail -exc "ceph_test_rados_$f $color 2>&1 | tee ceph_test_rados_$ff.log | sed \"s/^/$r: /\"" &
+ pid=$!
+ echo "test $f on pid $pid"
+ pids[$f]=$pid
+ else
+ ceph_test_rados_$f
+ fi
+done
+
+ret=0
+if [ $parallel -eq 1 ]; then
+for t in "${!pids[@]}"
+do
+ pid=${pids[$t]}
+ if ! wait $pid
+ then
+ echo "error in $t ($pid)"
+ ret=1
+ fi
+done
+fi
+
+exit $ret
diff --git a/qa/workunits/rados/test_alloc_hint.sh b/qa/workunits/rados/test_alloc_hint.sh
new file mode 100755
index 000000000..535201ca3
--- /dev/null
+++ b/qa/workunits/rados/test_alloc_hint.sh
@@ -0,0 +1,177 @@
+#!/usr/bin/env bash
+
+set -ex
+shopt -s nullglob # fns glob expansion in expect_alloc_hint_eq()
+
+#
+# Helpers
+#
+
+function get_xml_val() {
+ local xml="$1"
+ local tag="$2"
+
+ local regex=".*<${tag}>(.*)</${tag}>.*"
+ if [[ ! "${xml}" =~ ${regex} ]]; then
+ echo "'${xml}' xml doesn't match '${tag}' tag regex" >&2
+ return 2
+ fi
+
+ echo "${BASH_REMATCH[1]}"
+}
+
+function get_conf_val() {
+ set -e
+
+ local entity="$1"
+ local option="$2"
+
+ local val
+ val="$(sudo ceph daemon "${entity}" config get --format=xml "${option}")"
+ val="$(get_xml_val "${val}" "${option}")"
+
+ echo "${val}"
+}
+
+function setup_osd_data() {
+ for (( i = 0 ; i < "${NUM_OSDS}" ; i++ )); do
+ OSD_DATA[i]="$(get_conf_val "osd.$i" "osd_data")"
+ done
+}
+
+function setup_pgid() {
+ local poolname="$1"
+ local objname="$2"
+
+ local pgid
+ pgid="$(ceph osd map "${poolname}" "${objname}" --format=xml)"
+ pgid="$(get_xml_val "${pgid}" "pgid")"
+
+ PGID="${pgid}"
+}
+
+function expect_alloc_hint_eq() {
+ export CEPH_ARGS="--osd-objectstore=filestore"
+ local expected_extsize="$1"
+
+ for (( i = 0 ; i < "${NUM_OSDS}" ; i++ )); do
+ # Make sure that stuff is flushed from the journal to the store
+ # by the time we get to it, as we prod the actual files and not
+ # the journal.
+ sudo ceph daemon "osd.${i}" "flush_journal"
+
+ # e.g., .../25.6_head/foo__head_7FC1F406__19
+ # .../26.bs1_head/bar__head_EFE6384B__1a_ffffffffffffffff_1
+ local fns=$(sudo sh -c "ls ${OSD_DATA[i]}/current/${PGID}*_head/${OBJ}_*")
+ local count="${#fns[@]}"
+ if [ "${count}" -ne 1 ]; then
+ echo "bad fns count: ${count}" >&2
+ return 2
+ fi
+
+ local extsize
+ extsize="$(sudo xfs_io -c extsize "${fns[0]}")"
+ local extsize_regex="^\[(.*)\] ${fns[0]}$"
+ if [[ ! "${extsize}" =~ ${extsize_regex} ]]; then
+ echo "extsize doesn't match extsize_regex: ${extsize}" >&2
+ return 2
+ fi
+ extsize="${BASH_REMATCH[1]}"
+
+ if [ "${extsize}" -ne "${expected_extsize}" ]; then
+ echo "FAIL: alloc_hint: actual ${extsize}, expected ${expected_extsize}" >&2
+ return 1
+ fi
+ done
+}
+
+#
+# Global setup
+#
+
+EC_K="2"
+EC_M="1"
+NUM_OSDS="$((EC_K + EC_M))"
+
+NUM_PG="12"
+NUM_PGP="${NUM_PG}"
+
+LOW_CAP="$(get_conf_val "osd.0" "filestore_max_alloc_hint_size")"
+HIGH_CAP="$((LOW_CAP * 10))" # 10M, assuming 1M default cap
+SMALL_HINT="$((LOW_CAP / 4))" # 256K, assuming 1M default cap
+BIG_HINT="$((LOW_CAP * 6))" # 6M, assuming 1M default cap
+
+setup_osd_data
+
+#
+# ReplicatedBackend tests
+#
+
+POOL="alloc_hint-rep"
+ceph osd pool create "${POOL}" "${NUM_PG}"
+ceph osd pool set "${POOL}" size "${NUM_OSDS}" --yes-i-really-mean-it
+ceph osd pool application enable "${POOL}" rados
+
+OBJ="foo"
+setup_pgid "${POOL}" "${OBJ}"
+rados -p "${POOL}" create "${OBJ}"
+
+# Empty object, SMALL_HINT - expect SMALL_HINT
+rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
+expect_alloc_hint_eq "${SMALL_HINT}"
+
+# Try changing to BIG_HINT (1) - expect LOW_CAP (BIG_HINT > LOW_CAP)
+rados -p "${POOL}" set-alloc-hint "${OBJ}" "${BIG_HINT}" "${BIG_HINT}"
+expect_alloc_hint_eq "${LOW_CAP}"
+
+# Bump the cap to HIGH_CAP
+ceph tell 'osd.*' injectargs "--filestore_max_alloc_hint_size ${HIGH_CAP}"
+
+# Try changing to BIG_HINT (2) - expect BIG_HINT (BIG_HINT < HIGH_CAP)
+rados -p "${POOL}" set-alloc-hint "${OBJ}" "${BIG_HINT}" "${BIG_HINT}"
+expect_alloc_hint_eq "${BIG_HINT}"
+
+ceph tell 'osd.*' injectargs "--filestore_max_alloc_hint_size ${LOW_CAP}"
+
+# Populate object with some data
+rados -p "${POOL}" put "${OBJ}" /etc/passwd
+
+# Try changing back to SMALL_HINT - expect BIG_HINT (non-empty object)
+rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
+expect_alloc_hint_eq "${BIG_HINT}"
+
+OBJ="bar"
+setup_pgid "${POOL}" "${OBJ}"
+
+# Non-existent object, SMALL_HINT - expect SMALL_HINT (object creation)
+rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
+expect_alloc_hint_eq "${SMALL_HINT}"
+
+ceph osd pool delete "${POOL}" "${POOL}" --yes-i-really-really-mean-it
+
+#
+# ECBackend tests
+#
+
+PROFILE="alloc_hint-ecprofile"
+POOL="alloc_hint-ec"
+ceph osd erasure-code-profile set "${PROFILE}" k=2 m=1 crush-failure-domain=osd
+ceph osd erasure-code-profile get "${PROFILE}" # just so it's logged
+ceph osd pool create "${POOL}" "${NUM_PG}" "${NUM_PGP}" erasure "${PROFILE}"
+ceph osd pool application enable "${POOL}" rados
+
+OBJ="baz"
+setup_pgid "${POOL}" "${OBJ}"
+rados -p "${POOL}" create "${OBJ}"
+
+# Empty object, SMALL_HINT - expect scaled-down SMALL_HINT
+rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
+expect_alloc_hint_eq "$((SMALL_HINT / EC_K))"
+
+ceph osd pool delete "${POOL}" "${POOL}" --yes-i-really-really-mean-it
+
+#
+# Global teardown
+#
+
+echo "OK"
diff --git a/qa/workunits/rados/test_cache_pool.sh b/qa/workunits/rados/test_cache_pool.sh
new file mode 100755
index 000000000..f4187a98a
--- /dev/null
+++ b/qa/workunits/rados/test_cache_pool.sh
@@ -0,0 +1,170 @@
+#!/usr/bin/env bash
+
+set -ex
+
+expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+# create pools, set up tier relationship
+ceph osd pool create base_pool 2
+ceph osd pool application enable base_pool rados
+ceph osd pool create partial_wrong 2
+ceph osd pool create wrong_cache 2
+ceph osd tier add base_pool partial_wrong
+ceph osd tier add base_pool wrong_cache
+
+# populate base_pool with some data
+echo "foo" > foo.txt
+echo "bar" > bar.txt
+echo "baz" > baz.txt
+rados -p base_pool put fooobj foo.txt
+rados -p base_pool put barobj bar.txt
+# fill in wrong_cache backwards so we can tell we read from it
+rados -p wrong_cache put fooobj bar.txt
+rados -p wrong_cache put barobj foo.txt
+# partial_wrong gets barobj backwards so we can check promote and non-promote
+rados -p partial_wrong put barobj foo.txt
+
+# get the objects back before setting a caching pool
+rados -p base_pool get fooobj tmp.txt
+diff -q tmp.txt foo.txt
+rados -p base_pool get barobj tmp.txt
+diff -q tmp.txt bar.txt
+
+# set up redirect and make sure we get backwards results
+ceph osd tier set-overlay base_pool wrong_cache
+ceph osd tier cache-mode wrong_cache writeback
+rados -p base_pool get fooobj tmp.txt
+diff -q tmp.txt bar.txt
+rados -p base_pool get barobj tmp.txt
+diff -q tmp.txt foo.txt
+
+# switch cache pools and make sure we're doing promote
+ceph osd tier remove-overlay base_pool
+ceph osd tier set-overlay base_pool partial_wrong
+ceph osd tier cache-mode partial_wrong writeback
+rados -p base_pool get fooobj tmp.txt
+diff -q tmp.txt foo.txt # hurray, it promoted!
+rados -p base_pool get barobj tmp.txt
+diff -q tmp.txt foo.txt # yep, we read partial_wrong's local object!
+
+# try a nonexistent object and make sure we get an error
+expect_false rados -p base_pool get bazobj tmp.txt
+
+# drop the cache entirely and make sure contents are still the same
+ceph osd tier remove-overlay base_pool
+rados -p base_pool get fooobj tmp.txt
+diff -q tmp.txt foo.txt
+rados -p base_pool get barobj tmp.txt
+diff -q tmp.txt bar.txt
+
+# create an empty cache pool and make sure it has objects after reading
+ceph osd pool create empty_cache 2
+
+touch empty.txt
+rados -p empty_cache ls > tmp.txt
+diff -q tmp.txt empty.txt
+
+ceph osd tier add base_pool empty_cache
+ceph osd tier set-overlay base_pool empty_cache
+ceph osd tier cache-mode empty_cache writeback
+rados -p base_pool get fooobj tmp.txt
+rados -p base_pool get barobj tmp.txt
+expect_false rados -p base_pool get bazobj tmp.txt
+
+rados -p empty_cache ls > tmp.txt
+expect_false diff -q tmp.txt empty.txt
+
+# cleanup
+ceph osd tier remove-overlay base_pool
+ceph osd tier remove base_pool wrong_cache
+ceph osd tier remove base_pool partial_wrong
+ceph osd tier remove base_pool empty_cache
+ceph osd pool delete base_pool base_pool --yes-i-really-really-mean-it
+ceph osd pool delete empty_cache empty_cache --yes-i-really-really-mean-it
+ceph osd pool delete wrong_cache wrong_cache --yes-i-really-really-mean-it
+ceph osd pool delete partial_wrong partial_wrong --yes-i-really-really-mean-it
+
+## set of base, cache
+ceph osd pool create base 8
+ceph osd pool application enable base rados
+ceph osd pool create cache 8
+
+ceph osd tier add base cache
+ceph osd tier cache-mode cache writeback
+ceph osd tier set-overlay base cache
+
+# cache-flush, cache-evict
+rados -p base put foo /etc/passwd
+expect_false rados -p base cache-evict foo
+expect_false rados -p base cache-flush foo
+expect_false rados -p cache cache-evict foo
+rados -p cache cache-flush foo
+rados -p cache cache-evict foo
+rados -p cache ls - | wc -l | grep 0
+
+# cache-try-flush, cache-evict
+rados -p base put foo /etc/passwd
+expect_false rados -p base cache-evict foo
+expect_false rados -p base cache-flush foo
+expect_false rados -p cache cache-evict foo
+rados -p cache cache-try-flush foo
+rados -p cache cache-evict foo
+rados -p cache ls - | wc -l | grep 0
+
+# cache-flush-evict-all
+rados -p base put bar /etc/passwd
+rados -p cache ls - | wc -l | grep 1
+expect_false rados -p base cache-flush-evict-all
+rados -p cache cache-flush-evict-all
+rados -p cache ls - | wc -l | grep 0
+
+# cache-try-flush-evict-all
+rados -p base put bar /etc/passwd
+rados -p cache ls - | wc -l | grep 1
+expect_false rados -p base cache-flush-evict-all
+rados -p cache cache-try-flush-evict-all
+rados -p cache ls - | wc -l | grep 0
+
+# cache flush/evit when clone objects exist
+rados -p base put testclone /etc/passwd
+rados -p cache ls - | wc -l | grep 1
+ceph osd pool mksnap base snap
+rados -p base put testclone /etc/hosts
+rados -p cache cache-flush-evict-all
+rados -p cache ls - | wc -l | grep 0
+
+ceph osd tier cache-mode cache proxy --yes-i-really-mean-it
+rados -p base -s snap get testclone testclone.txt
+diff -q testclone.txt /etc/passwd
+rados -p base get testclone testclone.txt
+diff -q testclone.txt /etc/hosts
+
+# test --with-clones option
+ceph osd tier cache-mode cache writeback
+rados -p base put testclone2 /etc/passwd
+rados -p cache ls - | wc -l | grep 1
+ceph osd pool mksnap base snap1
+rados -p base put testclone2 /etc/hosts
+expect_false rados -p cache cache-flush testclone2
+rados -p cache cache-flush testclone2 --with-clones
+expect_false rados -p cache cache-evict testclone2
+rados -p cache cache-evict testclone2 --with-clones
+rados -p cache ls - | wc -l | grep 0
+
+rados -p base -s snap1 get testclone2 testclone2.txt
+diff -q testclone2.txt /etc/passwd
+rados -p base get testclone2 testclone2.txt
+diff -q testclone2.txt /etc/hosts
+
+# cleanup
+ceph osd tier remove-overlay base
+ceph osd tier remove base cache
+
+ceph osd pool delete cache cache --yes-i-really-really-mean-it
+ceph osd pool delete base base --yes-i-really-really-mean-it
+
+echo OK
diff --git a/qa/workunits/rados/test_crash.sh b/qa/workunits/rados/test_crash.sh
new file mode 100755
index 000000000..26a4c9bdc
--- /dev/null
+++ b/qa/workunits/rados/test_crash.sh
@@ -0,0 +1,44 @@
+#!/bin/sh
+
+set -x
+
+# run on a single-node three-OSD cluster
+
+sudo killall -ABRT ceph-osd
+sleep 5
+
+# kill caused coredumps; find them and delete them, carefully, so as
+# not to disturb other coredumps, or else teuthology will see them
+# and assume test failure. sudos are because the core files are
+# root/600
+for f in $(find $TESTDIR/archive/coredump -type f); do
+ gdb_output=$(echo "quit" | sudo gdb /usr/bin/ceph-osd $f)
+ if expr match "$gdb_output" ".*generated.*ceph-osd.*" && \
+ ( \
+
+ expr match "$gdb_output" ".*terminated.*signal 6.*" || \
+ expr match "$gdb_output" ".*terminated.*signal SIGABRT.*" \
+ )
+ then
+ sudo rm $f
+ fi
+done
+
+# ceph-crash runs as the unprivileged "ceph" user, but when under test
+# the ceph osd daemons are running as root, so their crash files aren't
+# readable. let's chown them so they behave as they would in real life.
+sudo chown -R ceph:ceph /var/lib/ceph/crash
+
+# let daemon find crashdumps on startup
+sudo systemctl restart ceph-crash
+sleep 30
+
+# must be 3 crashdumps registered and moved to crash/posted
+[ $(ceph crash ls | wc -l) = 4 ] || exit 1 # 4 here bc of the table header
+[ $(sudo find /var/lib/ceph/crash/posted/ -name meta | wc -l) = 3 ] || exit 1
+
+# there should be a health warning
+ceph health detail | grep RECENT_CRASH || exit 1
+ceph crash archive-all
+sleep 30
+ceph health detail | grep -c RECENT_CRASH | grep 0 # should be gone!
diff --git a/qa/workunits/rados/test_crushdiff.sh b/qa/workunits/rados/test_crushdiff.sh
new file mode 100755
index 000000000..833ecbd0b
--- /dev/null
+++ b/qa/workunits/rados/test_crushdiff.sh
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+
+set -ex
+
+REP_POOL=
+EC_POOL=
+TEMPDIR=
+
+OSD_NUM=$(ceph osd ls | wc -l)
+test ${OSD_NUM} -gt 0
+
+setup() {
+ local pool
+
+ TEMPDIR=`mktemp -d`
+
+ pool=test-crushdiff-rep-$$
+ ceph osd pool create ${pool} 32
+ REP_POOL=${pool}
+ rados -p ${REP_POOL} bench 5 write --no-cleanup
+
+ if [ ${OSD_NUM} -gt 3 ]; then
+ pool=test-crushdiff-ec-$$
+ ceph osd pool create ${pool} 32 32 erasure
+ EC_POOL=${pool}
+ rados -p ${EC_POOL} bench 5 write --no-cleanup
+ fi
+}
+
+cleanup() {
+ set +e
+
+ test -n "${EC_POOL}" &&
+ ceph osd pool delete "${EC_POOL}" "${EC_POOL}" \
+ --yes-i-really-really-mean-it
+ EC_POOL=
+
+ test -n "${REP_POOL}" &&
+ ceph osd pool delete "${REP_POOL}" "${REP_POOL}" \
+ --yes-i-really-really-mean-it
+ REP_POOL=
+
+ test -n "${TEMPDIR}" && rm -Rf ${TEMPDIR}
+ TEMPDIR=
+}
+
+trap "cleanup" INT TERM EXIT
+
+setup
+
+# test without crushmap modification
+
+crushdiff export ${TEMPDIR}/cm.txt --verbose
+crushdiff compare ${TEMPDIR}/cm.txt --verbose
+crushdiff import ${TEMPDIR}/cm.txt --verbose
+
+# test using a compiled crushmap
+
+crushdiff export ${TEMPDIR}/cm --compiled --verbose
+crushdiff compare ${TEMPDIR}/cm --compiled --verbose
+crushdiff import ${TEMPDIR}/cm --compiled --verbose
+
+# test using "offline" osdmap and pg-dump
+
+ceph osd getmap -o ${TEMPDIR}/osdmap
+ceph pg dump --format json > ${TEMPDIR}/pg-dump
+
+crushdiff export ${TEMPDIR}/cm.txt --osdmap ${TEMPDIR}/osdmap --verbose
+crushdiff compare ${TEMPDIR}/cm.txt --osdmap ${TEMPDIR}/osdmap \
+ --pg-dump ${TEMPDIR}/pg-dump --verbose | tee ${TEMPDIR}/compare.txt
+
+# test the diff is zero when the crushmap is not modified
+
+grep '^0/[0-9]* (0\.00%) pgs affected' ${TEMPDIR}/compare.txt
+grep '^0/[0-9]* (0\.00%) objects affected' ${TEMPDIR}/compare.txt
+grep '^0/[0-9]* (0\.00%) pg shards to move' ${TEMPDIR}/compare.txt
+grep '^0/[0-9]* (0\.00%) pg object shards to move' ${TEMPDIR}/compare.txt
+grep '^0\.00/.* (0\.00%) bytes to move' ${TEMPDIR}/compare.txt
+crushdiff import ${TEMPDIR}/cm.txt --osdmap ${TEMPDIR}/osdmap --verbose
+
+if [ ${OSD_NUM} -gt 3 ]; then
+
+ # test the diff is non-zero when the crushmap is modified
+
+ cat ${TEMPDIR}/cm.txt >&2
+
+ weight=$(awk '/item osd\.0 weight ([0-9.]+)/ {print $4 * 3}' \
+ ${TEMPDIR}/cm.txt)
+ test -n "${weight}"
+ sed -i -Ee 's/^(.*item osd\.0 weight )[0-9.]+/\1'${weight}'/' \
+ ${TEMPDIR}/cm.txt
+ crushdiff compare ${TEMPDIR}/cm.txt --osdmap ${TEMPDIR}/osdmap \
+ --pg-dump ${TEMPDIR}/pg-dump --verbose | tee ${TEMPDIR}/compare.txt
+ grep '^[1-9][0-9]*/[0-9]* (.*%) pgs affected' ${TEMPDIR}/compare.txt
+ grep '^[1-9][0-9]*/[0-9]* (.*%) objects affected' ${TEMPDIR}/compare.txt
+ grep '^[1-9][0-9]*/[0-9]* (.*%) pg shards to move' ${TEMPDIR}/compare.txt
+ grep '^[1-9][0-9]*/[0-9]* (.*%) pg object shards to move' \
+ ${TEMPDIR}/compare.txt
+ grep '^.*/.* (.*%) bytes to move' ${TEMPDIR}/compare.txt
+ crushdiff import ${TEMPDIR}/cm.txt --osdmap ${TEMPDIR}/osdmap --verbose
+fi
+
+echo OK
diff --git a/qa/workunits/rados/test_dedup_tool.sh b/qa/workunits/rados/test_dedup_tool.sh
new file mode 100755
index 000000000..18deb331b
--- /dev/null
+++ b/qa/workunits/rados/test_dedup_tool.sh
@@ -0,0 +1,458 @@
+#!/usr/bin/env bash
+
+set -x
+
+die() {
+ echo "$@"
+ exit 1
+}
+
+do_run() {
+ if [ "$1" == "--tee" ]; then
+ shift
+ tee_out="$1"
+ shift
+ "$@" | tee $tee_out
+ else
+ "$@"
+ fi
+}
+
+run_expect_succ() {
+ echo "RUN_EXPECT_SUCC: " "$@"
+ do_run "$@"
+ [ $? -ne 0 ] && die "expected success, but got failure! cmd: $@"
+}
+
+run() {
+ echo "RUN: " $@
+ do_run "$@"
+}
+
+if [ -n "$CEPH_BIN" ] ; then
+ # CMake env
+ RADOS_TOOL="$CEPH_BIN/rados"
+ CEPH_TOOL="$CEPH_BIN/ceph"
+ DEDUP_TOOL="$CEPH_BIN/ceph-dedup-tool"
+else
+ # executables should be installed by the QA env
+ RADOS_TOOL=$(which rados)
+ CEPH_TOOL=$(which ceph)
+ DEDUP_TOOL=$(which ceph-dedup-tool)
+fi
+
+POOL=dedup_pool
+OBJ=test_rados_obj
+
+[ -x "$RADOS_TOOL" ] || die "couldn't find $RADOS_TOOL binary to test"
+[ -x "$CEPH_TOOL" ] || die "couldn't find $CEPH_TOOL binary to test"
+
+run_expect_succ "$CEPH_TOOL" osd pool create "$POOL" 8
+sleep 5
+
+function test_dedup_ratio_fixed()
+{
+ # case 1
+ dd if=/dev/urandom of=dedup_object_1k bs=1K count=1
+ for num in `seq 1 50`
+ do
+ dd if=dedup_object_1k of=dedup_object_100k bs=1K oflag=append conv=notrunc
+ done
+ for num in `seq 1 50`
+ do
+ dd if=/dev/zero of=dedup_object_100k bs=1K count=1 oflag=append conv=notrunc
+ done
+
+ $RADOS_TOOL -p $POOL put $OBJ ./dedup_object_100k
+ RESULT=$($DEDUP_TOOL --op estimate --pool $POOL --chunk-size 1024 --chunk-algorithm fixed --fingerprint-algorithm sha1 | grep chunk_size_average | awk '{print$2}' | sed "s/\,//g")
+ # total size / the number of deduped object = 100K / 1
+ if [ 51200 -ne $RESULT ];
+ then
+ die "Estimate failed expecting 51200 result $RESULT"
+ fi
+
+ # case 2
+ dd if=/dev/zero of=dedup_object_10m bs=10M count=1
+
+ $RADOS_TOOL -p $POOL put $OBJ ./dedup_object_10m
+ RESULT=$($DEDUP_TOOL --op estimate --pool $POOL --chunk-size 4096 --chunk-algorithm fixed --fingerprint-algorithm sha1 | grep examined_bytes | awk '{print$2}')
+ # 10485760
+ if [ 10485760 -ne $RESULT ];
+ then
+ die "Estimate failed expecting 10485760 result $RESULT"
+ fi
+
+ # case 3 max_thread
+ for num in `seq 0 20`
+ do
+ dd if=/dev/zero of=dedup_object_$num bs=4M count=1
+ $RADOS_TOOL -p $POOL put dedup_object_$num ./dedup_object_$num
+ done
+
+ RESULT=$($DEDUP_TOOL --op estimate --pool $POOL --chunk-size 4096 --chunk-algorithm fixed --fingerprint-algorithm sha1 --max-thread 4 | grep chunk_size_average | awk '{print$2}' | sed "s/\,//g")
+
+ if [ 98566144 -ne $RESULT ];
+ then
+ die "Estimate failed expecting 98566144 result $RESULT"
+ fi
+
+ rm -rf ./dedup_object_1k ./dedup_object_100k ./dedup_object_10m
+ for num in `seq 0 20`
+ do
+ rm -rf ./dedup_object_$num
+ done
+ $RADOS_TOOL -p $POOL rm $OBJ
+ for num in `seq 0 20`
+ do
+ $RADOS_TOOL -p $POOL rm dedup_object_$num
+ done
+}
+
+function test_dedup_chunk_scrub()
+{
+
+ CHUNK_POOL=dedup_chunk_pool
+ run_expect_succ "$CEPH_TOOL" osd pool create "$CHUNK_POOL" 8
+
+ echo "hi there" > foo
+
+ echo "hi there" > bar
+
+ echo "there" > foo-chunk
+
+ echo "CHUNK" > bar-chunk
+
+ $CEPH_TOOL osd pool set $POOL fingerprint_algorithm sha1 --yes-i-really-mean-it
+ $CEPH_TOOL osd pool set $POOL dedup_chunk_algorithm fastcdc --yes-i-really-mean-it
+ $CEPH_TOOL osd pool set $POOL dedup_cdc_chunk_size 4096 --yes-i-really-mean-it
+ $CEPH_TOOL osd pool set $POOL dedup_tier $CHUNK_POOL --yes-i-really-mean-it
+
+ $RADOS_TOOL -p $POOL put foo ./foo
+ $RADOS_TOOL -p $POOL put bar ./bar
+
+ $RADOS_TOOL -p $CHUNK_POOL put bar-chunk ./bar-chunk
+ $RADOS_TOOL -p $CHUNK_POOL put foo-chunk ./foo-chunk
+
+ $RADOS_TOOL -p $POOL set-chunk bar 0 8 --target-pool $CHUNK_POOL bar-chunk 0 --with-reference
+
+ echo -n "There hi" > test_obj
+ # dirty
+ $RADOS_TOOL -p $POOL put foo ./test_obj
+ $RADOS_TOOL -p $POOL set-chunk foo 0 8 --target-pool $CHUNK_POOL foo-chunk 0 --with-reference
+ # flush
+ $RADOS_TOOL -p $POOL tier-flush foo
+ sleep 2
+
+ $RADOS_TOOL ls -p $CHUNK_POOL
+ CHUNK_OID=$(echo -n "There hi" | sha1sum | awk '{print $1}')
+
+ POOL_ID=$($CEPH_TOOL osd pool ls detail | grep $POOL | awk '{print$2}')
+ $DEDUP_TOOL --op chunk-get-ref --chunk-pool $CHUNK_POOL --object $CHUNK_OID --target-ref bar --target-ref-pool-id $POOL_ID
+ RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID)
+
+ RESULT=$($DEDUP_TOOL --op chunk-scrub --chunk-pool $CHUNK_POOL | grep "Damaged object" | awk '{print$4}')
+ if [ $RESULT -ne "1" ] ; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Chunk-scrub failed expecting damaged objects is not 1"
+ fi
+
+ $DEDUP_TOOL --op chunk-put-ref --chunk-pool $CHUNK_POOL --object $CHUNK_OID --target-ref bar --target-ref-pool-id $POOL_ID
+ RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID | grep bar)
+ if [ -n "$RESULT" ] ; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Scrub failed expecting bar is removed"
+ fi
+
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+
+ rm -rf ./foo ./bar ./foo-chunk ./bar-chunk ./test_obj
+ $RADOS_TOOL -p $POOL rm foo
+ $RADOS_TOOL -p $POOL rm bar
+}
+
+function test_dedup_chunk_repair()
+{
+
+ CHUNK_POOL=dedup_chunk_pool
+ run_expect_succ "$CEPH_TOOL" osd pool create "$CHUNK_POOL" 8
+
+ echo -n "hi there" > foo
+
+ echo -n "hi there" > bar
+
+ echo -n "there" > foo-chunk
+
+ echo -n "CHUNK" > bar-chunk
+
+ $CEPH_TOOL osd pool set $POOL fingerprint_algorithm sha1 --yes-i-really-mean-it
+ $CEPH_TOOL osd pool set $POOL dedup_chunk_algorithm fastcdc --yes-i-really-mean-it
+ $CEPH_TOOL osd pool set $POOL dedup_cdc_chunk_size 4096 --yes-i-really-mean-it
+ $CEPH_TOOL osd pool set $POOL dedup_tier $CHUNK_POOL --yes-i-really-mean-it
+
+ $RADOS_TOOL -p $POOL put foo ./foo
+ $RADOS_TOOL -p $POOL put bar ./bar
+
+ $RADOS_TOOL -p $CHUNK_POOL put bar-chunk ./bar-chunk
+ $RADOS_TOOL -p $CHUNK_POOL put foo-chunk ./foo-chunk
+
+ $RADOS_TOOL ls -p $CHUNK_POOL
+ CHUNK_OID=$(echo -n "hi there" | sha1sum | awk '{print $1}')
+
+ POOL_ID=$($CEPH_TOOL osd pool ls detail | grep $POOL | awk '{print$2}')
+ $RADOS_TOOL -p $CHUNK_POOL put $CHUNK_OID ./foo
+
+ # increase ref count by two, resuling in mismatch
+ $DEDUP_TOOL --op chunk-get-ref --chunk-pool $CHUNK_POOL --object $CHUNK_OID --target-ref foo --target-ref-pool-id $POOL_ID
+ $DEDUP_TOOL --op chunk-get-ref --chunk-pool $CHUNK_POOL --object $CHUNK_OID --target-ref foo --target-ref-pool-id $POOL_ID
+ $DEDUP_TOOL --op chunk-get-ref --chunk-pool $CHUNK_POOL --object $CHUNK_OID --target-ref foo --target-ref-pool-id $POOL_ID
+ $DEDUP_TOOL --op chunk-get-ref --chunk-pool $CHUNK_POOL --object $CHUNK_OID --target-ref foo --target-ref-pool-id $POOL_ID
+ $DEDUP_TOOL --op chunk-get-ref --chunk-pool $CHUNK_POOL --object bar-chunk --target-ref bar --target-ref-pool-id $POOL_ID
+ $DEDUP_TOOL --op chunk-get-ref --chunk-pool $CHUNK_POOL --object bar-chunk --target-ref bar --target-ref-pool-id $POOL_ID
+
+ RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID)
+ RESULT=$($DEDUP_TOOL --op chunk-scrub --chunk-pool $CHUNK_POOL | grep "Damaged object" | awk '{print$4}')
+ if [ $RESULT -ne "2" ] ; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Chunk-scrub failed expecting damaged objects is not 1"
+ fi
+
+ $DEDUP_TOOL --op chunk-repair --chunk-pool $CHUNK_POOL --object $CHUNK_OID --target-ref foo --target-ref-pool-id $POOL_ID
+ $DEDUP_TOOL --op chunk-repair --chunk-pool $CHUNK_POOL --object bar-chunk --target-ref bar --target-ref-pool-id $POOL_ID
+
+ RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID | grep foo | wc -l)
+ if [ 0 -ne "$RESULT" ] ; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Scrub failed expecting bar is removed"
+ fi
+ RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object bar-chunk | grep bar | wc -l)
+ if [ 0 -ne "$RESULT" ] ; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Scrub failed expecting bar is removed"
+ fi
+
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+
+ rm -rf ./foo ./bar ./foo-chunk ./bar-chunk ./test_obj
+ $RADOS_TOOL -p $POOL rm foo
+ $RADOS_TOOL -p $POOL rm bar
+}
+
+function test_dedup_object()
+{
+
+ CHUNK_POOL=dedup_chunk_pool
+ run_expect_succ "$CEPH_TOOL" osd pool create "$CHUNK_POOL" 8
+
+ echo "There hiHI" > foo
+
+ $CEPH_TOOL osd pool set $POOL dedup_tier $CHUNK_POOL --yes-i-really-mean-it
+ $RADOS_TOOL -p $POOL put foo ./foo
+
+ sleep 2
+
+ rados ls -p $CHUNK_POOL
+
+ RESULT=$($DEDUP_TOOL --pool $POOL --op chunk-dedup --object foo --chunk-pool $CHUNK_POOL --source-off 0 --source-length 10 --fingerprint-algorithm sha1 )
+
+ POOL_ID=$($CEPH_TOOL osd pool ls detail | grep $POOL | awk '{print$2}')
+ CHUNK_OID=$(echo -n "There hiHI" | sha1sum | awk '{print $1}')
+
+ RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID | grep foo)
+
+ if [ -z "$RESULT" ] ; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Scrub failed expecting bar is removed"
+ fi
+
+ $RADOS_TOOL -p $CHUNK_POOL get $CHUNK_OID ./chunk
+ VERIFY=$(cat ./chunk | sha1sum | awk '{print $1}')
+ if [ "$CHUNK_OID" != "$VERIFY" ] ; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Comparing failed expecting chunk mismatch"
+ fi
+
+ echo -n "There hihiHI" > bar
+
+ $RADOS_TOOL -p $POOL put bar ./bar
+ RESULT=$($DEDUP_TOOL --pool $POOL --op object-dedup --object bar --chunk-pool $CHUNK_POOL --fingerprint-algorithm sha1 --dedup-cdc-chunk-size 4096)
+
+ CHUNK_OID=$(echo -n "There hihiHI" | sha1sum | awk '{print $1}')
+
+ RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID | grep bar)
+ if [ -z "$RESULT" ] ; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Scrub failed expecting bar is removed"
+ fi
+
+ $RADOS_TOOL -p $CHUNK_POOL get $CHUNK_OID ./chunk
+ VERIFY=$(cat ./chunk | sha1sum | awk '{print $1}')
+ if [ "$CHUNK_OID" != "$VERIFY" ] ; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Comparing failed expecting chunk mismatch"
+ fi
+
+ echo -n "THERE HIHIHI" > bar
+ $RADOS_TOOL -p $POOL put bar ./bar
+ $RADOS_TOOL -p $POOL mksnap mysnap
+
+ echo -n "There HIHIHI" > bar
+ $RADOS_TOOL -p $POOL put bar ./bar
+
+ RESULT=$($DEDUP_TOOL --pool $POOL --op object-dedup --object bar --chunk-pool $CHUNK_POOL --fingerprint-algorithm sha1 --dedup-cdc-chunk-size 4096 --snap)
+
+ CHUNK_OID=$(echo -n "THERE HIHIHI" | sha1sum | awk '{print $1}')
+ RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID | grep bar)
+ if [ -z "$RESULT" ] ; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Scrub failed expecting bar is removed"
+ fi
+
+ CHUNK_OID=$(echo -n "There HIHIHI" | sha1sum | awk '{print $1}')
+ RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID | grep bar)
+ if [ -z "$RESULT" ] ; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Scrub failed expecting bar is removed"
+ fi
+ # rerun tier-flush
+
+ RESULT=$($DEDUP_TOOL --pool $POOL --op object-dedup --object bar --chunk-pool $CHUNK_POOL --fingerprint-algorithm sha1 --dedup-cdc-chunk-size 4096)
+
+ CHUNK_OID=$(echo -n "There HIHIHI" | sha1sum | awk '{print $1}')
+ RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID | grep bar)
+ if [ -z "$RESULT" ] ; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Scrub failed expecting bar is removed"
+ fi
+
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+
+ rm -rf ./foo ./bar ./chunk
+ $RADOS_TOOL -p $POOL rm foo
+ $RADOS_TOOL -p $POOL rm bar
+}
+
+function test_sample_dedup()
+{
+ CHUNK_POOL=dedup_chunk_pool
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+
+ sleep 2
+
+ run_expect_succ "$CEPH_TOOL" osd pool create "$POOL" 8
+ run_expect_succ "$CEPH_TOOL" osd pool create "$CHUNK_POOL" 8
+ run_expect_succ "$CEPH_TOOL" osd pool set "$POOL" dedup_tier "$CHUNK_POOL"
+ run_expect_succ "$CEPH_TOOL" osd pool set "$POOL" dedup_chunk_algorithm fastcdc
+ run_expect_succ "$CEPH_TOOL" osd pool set "$POOL" dedup_cdc_chunk_size 8192
+ run_expect_succ "$CEPH_TOOL" osd pool set "$POOL" fingerprint_algorithm sha1
+
+ # 8 Dedupable objects
+ CONTENT_1="There hiHI"
+ echo $CONTENT_1 > foo
+ for num in `seq 1 8`
+ do
+ $RADOS_TOOL -p $POOL put foo_$num ./foo
+ done
+
+ # 1 Unique object
+ CONTENT_3="There hiHI3"
+ echo $CONTENT_3 > foo3
+ $RADOS_TOOL -p $POOL put foo3_1 ./foo3
+
+ sleep 2
+
+ # Execute dedup crawler
+ RESULT=$($DEDUP_TOOL --pool $POOL --chunk-pool $CHUNK_POOL --op sample-dedup --chunk-algorithm fastcdc --fingerprint-algorithm sha1 --chunk-dedup-threshold 3 --sampling-ratio 50)
+
+ CHUNK_OID_1=$(echo $CONTENT_1 | sha1sum | awk '{print $1}')
+ CHUNK_OID_3=$(echo $CONTENT_3 | sha1sum | awk '{print $1}')
+
+ # Find chunk object has references of 8 dedupable meta objects
+ RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID_1)
+ DEDUP_COUNT=0
+ for num in `seq 1 8`
+ do
+ GREP_RESULT=$(echo $RESULT | grep foo_$num)
+ if [ -n "$GREP_RESULT" ]; then
+ DEDUP_COUNT=$(($DEDUP_COUNT + 1))
+ fi
+ done
+ if [ $DEDUP_COUNT -lt 2 ]; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Chunk object has no reference of first meta object"
+ fi
+
+ # 7 Duplicated objects but less than chunk dedup threshold
+ CONTENT_2="There hiHI2"
+ echo $CONTENT_2 > foo2
+ for num in `seq 1 7`
+ do
+ $RADOS_TOOL -p $POOL put foo2_$num ./foo2
+ done
+ CHUNK_OID_2=$(echo $CONTENT_2 | sha1sum | awk '{print $1}')
+
+ RESULT=$($DEDUP_TOOL --pool $POOL --chunk-pool $CHUNK_POOL --op sample-dedup --chunk-algorithm fastcdc --fingerprint-algorithm sha1 --sampling-ratio 100 --chunk-dedup-threshold 2)
+
+ # Objects duplicates less than chunk dedup threshold should be deduplicated because of they satisfies object-dedup-threshold
+ # The only object, which is crawled at the very first, should not be deduplicated because it was not duplicated at initial time
+ RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID_2)
+ DEDUP_COUNT=0
+ for num in `seq 1 7`
+ do
+ GREP_RESULT=$(echo $RESULT | grep foo2_$num)
+ if [ -n "$GREP_RESULT" ]; then
+ DEDUP_COUNT=$(($DEDUP_COUNT + 1))
+ fi
+ done
+ if [ $DEDUP_COUNT -ne 6 ]; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Chunk object has no reference of first meta object"
+ fi
+
+ # Unique object should not be deduplicated
+ RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID_3)
+ GREP_RESULT=$($RESULT | grep $CHUNK_OID_3)
+ if [ -n "$GREP_RESULT" ]; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Chunk object has no reference of second meta object"
+ fi
+
+ rm -rf ./foo ./foo2 ./foo3
+ for num in `seq 1 8`
+ do
+ $RADOS_TOOL -p $POOL rm foo_$num
+ done
+ for num in `seq 1 2`
+ do
+ $RADOS_TOOL -p $POOL rm foo2_$num
+ done
+ $RADOS_TOOL -p $POOL rm foo3_1
+
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+}
+
+test_dedup_ratio_fixed
+test_dedup_chunk_scrub
+test_dedup_chunk_repair
+test_dedup_object
+test_sample_dedup
+
+$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+
+echo "SUCCESS!"
+exit 0
diff --git a/qa/workunits/rados/test_envlibrados_for_rocksdb.sh b/qa/workunits/rados/test_envlibrados_for_rocksdb.sh
new file mode 100755
index 000000000..371452f40
--- /dev/null
+++ b/qa/workunits/rados/test_envlibrados_for_rocksdb.sh
@@ -0,0 +1,97 @@
+#!/usr/bin/env bash
+set -ex
+
+############################################
+# Helper functions
+############################################
+source $(dirname $0)/../ceph-helpers-root.sh
+
+############################################
+# Install required tools
+############################################
+echo "Install required tools"
+
+CURRENT_PATH=`pwd`
+
+############################################
+# Compile&Start RocksDB
+############################################
+# install prerequisites
+# for rocksdb
+case $(distro_id) in
+ ubuntu|debian|devuan|softiron)
+ install git g++ libsnappy-dev zlib1g-dev libbz2-dev libradospp-dev cmake
+ ;;
+ centos|fedora|rhel)
+ case $(distro_id) in
+ rhel)
+ # RHEL needs CRB repo for snappy-devel
+ sudo subscription-manager repos --enable "codeready-builder-for-rhel-8-x86_64-rpms"
+ ;;
+ esac
+ install git gcc-c++.x86_64 snappy-devel zlib zlib-devel bzip2 bzip2-devel libradospp-devel.x86_64 cmake libarchive-3.3.3
+ ;;
+ opensuse*|suse|sles)
+ install git gcc-c++ snappy-devel zlib-devel libbz2-devel libradospp-devel
+ ;;
+ *)
+ echo "$(distro_id) is unknown, $@ will have to be installed manually."
+ ;;
+esac
+
+# # gflags
+# sudo yum install gflags-devel
+#
+# wget https://github.com/schuhschuh/gflags/archive/master.zip
+# unzip master.zip
+# cd gflags-master
+# mkdir build && cd build
+# export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1
+# make && make install
+
+# # snappy-devel
+
+
+echo "Compile rocksdb"
+if [ -e rocksdb ]; then
+ rm -fr rocksdb
+fi
+
+pushd $(dirname /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/bash.sh)/../../../
+git submodule update --init src/rocksdb
+popd
+git clone $(dirname /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/bash.sh)/../../../src/rocksdb rocksdb
+
+# compile code
+cd rocksdb
+if type cmake3 > /dev/null 2>&1 ; then
+ CMAKE=cmake3
+else
+ CMAKE=cmake
+fi
+
+[ -z "$BUILD_DIR" ] && BUILD_DIR=build
+mkdir ${BUILD_DIR} && cd ${BUILD_DIR} && ${CMAKE} -DCMAKE_BUILD_TYPE=Debug -DWITH_TESTS=ON -DWITH_LIBRADOS=ON -DWITH_SNAPPY=ON -DWITH_GFLAGS=OFF -DFAIL_ON_WARNINGS=OFF ..
+make rocksdb_env_librados_test -j8
+
+echo "Copy ceph.conf"
+# prepare ceph.conf
+mkdir -p ../ceph/src/
+if [ -f "/etc/ceph/ceph.conf" ]; then
+ cp /etc/ceph/ceph.conf ../ceph/src/
+elif [ -f "/etc/ceph/ceph/ceph.conf" ]; then
+ cp /etc/ceph/ceph/ceph.conf ../ceph/src/
+else
+ echo "/etc/ceph/ceph/ceph.conf doesn't exist"
+fi
+
+echo "Run EnvLibrados test"
+# run test
+if [ -f "../ceph/src/ceph.conf" ]
+ then
+ cp env_librados_test ~/cephtest/archive
+ ./env_librados_test
+else
+ echo "../ceph/src/ceph.conf doesn't exist"
+fi
+cd ${CURRENT_PATH}
diff --git a/qa/workunits/rados/test_hang.sh b/qa/workunits/rados/test_hang.sh
new file mode 100755
index 000000000..724e0bb82
--- /dev/null
+++ b/qa/workunits/rados/test_hang.sh
@@ -0,0 +1,8 @@
+#!/bin/sh -ex
+
+# Hang forever for manual testing using the thrasher
+while(true)
+do
+ sleep 300
+done
+exit 0
diff --git a/qa/workunits/rados/test_health_warnings.sh b/qa/workunits/rados/test_health_warnings.sh
new file mode 100755
index 000000000..d393e5c68
--- /dev/null
+++ b/qa/workunits/rados/test_health_warnings.sh
@@ -0,0 +1,76 @@
+#!/usr/bin/env bash
+
+set -uex
+
+# number of osds = 10
+crushtool -o crushmap --build --num_osds 10 host straw 2 rack straw 2 row straw 2 root straw 0
+ceph osd setcrushmap -i crushmap
+ceph osd tree
+ceph tell osd.* injectargs --osd_max_markdown_count 1024 --osd_max_markdown_period 1
+ceph osd set noout
+
+wait_for_healthy() {
+ while ceph health | grep down
+ do
+ sleep 1
+ done
+}
+
+test_mark_two_osds_same_host_down() {
+ ceph osd set noup
+ ceph osd down osd.0 osd.1
+ ceph health detail
+ ceph health | grep "1 host"
+ ceph health | grep "2 osds"
+ ceph health detail | grep "osd.0"
+ ceph health detail | grep "osd.1"
+ ceph osd unset noup
+ wait_for_healthy
+}
+
+test_mark_two_osds_same_rack_down() {
+ ceph osd set noup
+ ceph osd down osd.8 osd.9
+ ceph health detail
+ ceph health | grep "1 host"
+ ceph health | grep "1 rack"
+ ceph health | grep "1 row"
+ ceph health | grep "2 osds"
+ ceph health detail | grep "osd.8"
+ ceph health detail | grep "osd.9"
+ ceph osd unset noup
+ wait_for_healthy
+}
+
+test_mark_all_but_last_osds_down() {
+ ceph osd set noup
+ ceph osd down $(ceph osd ls | sed \$d)
+ ceph health detail
+ ceph health | grep "1 row"
+ ceph health | grep "2 racks"
+ ceph health | grep "4 hosts"
+ ceph health | grep "9 osds"
+ ceph osd unset noup
+ wait_for_healthy
+}
+
+test_mark_two_osds_same_host_down_with_classes() {
+ ceph osd set noup
+ ceph osd crush set-device-class ssd osd.0 osd.2 osd.4 osd.6 osd.8
+ ceph osd crush set-device-class hdd osd.1 osd.3 osd.5 osd.7 osd.9
+ ceph osd down osd.0 osd.1
+ ceph health detail
+ ceph health | grep "1 host"
+ ceph health | grep "2 osds"
+ ceph health detail | grep "osd.0"
+ ceph health detail | grep "osd.1"
+ ceph osd unset noup
+ wait_for_healthy
+}
+
+test_mark_two_osds_same_host_down
+test_mark_two_osds_same_rack_down
+test_mark_all_but_last_osds_down
+test_mark_two_osds_same_host_down_with_classes
+
+exit 0
diff --git a/qa/workunits/rados/test_large_omap_detection.py b/qa/workunits/rados/test_large_omap_detection.py
new file mode 100755
index 000000000..b5c81a8d8
--- /dev/null
+++ b/qa/workunits/rados/test_large_omap_detection.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python3
+# -*- mode:python -*-
+# vim: ts=4 sw=4 smarttab expandtab
+#
+# Copyright (C) 2017 Red Hat <contact@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+import json
+import rados
+import shlex
+import subprocess
+import time
+
+def cleanup(cluster):
+ cluster.delete_pool('large-omap-test-pool')
+ cluster.shutdown()
+
+def init():
+ # For local testing
+ #cluster = rados.Rados(conffile='./ceph.conf')
+ cluster = rados.Rados(conffile='/etc/ceph/ceph.conf')
+ cluster.connect()
+ print("\nCluster ID: " + cluster.get_fsid())
+ cluster.create_pool('large-omap-test-pool')
+ ioctx = cluster.open_ioctx('large-omap-test-pool')
+ ioctx.write_full('large-omap-test-object1', b"Lorem ipsum")
+ op = ioctx.create_write_op()
+
+ keys = []
+ values = []
+ for x in range(20001):
+ keys.append(str(x))
+ values.append(b"X")
+
+ ioctx.set_omap(op, tuple(keys), tuple(values))
+ ioctx.operate_write_op(op, 'large-omap-test-object1', 0)
+ ioctx.release_write_op(op)
+
+ ioctx.write_full('large-omap-test-object2', b"Lorem ipsum dolor")
+ op = ioctx.create_write_op()
+
+ buffer = ("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do "
+ "eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut "
+ "enim ad minim veniam, quis nostrud exercitation ullamco laboris "
+ "nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in "
+ "reprehenderit in voluptate velit esse cillum dolore eu fugiat "
+ "nulla pariatur. Excepteur sint occaecat cupidatat non proident, "
+ "sunt in culpa qui officia deserunt mollit anim id est laborum.")
+
+ keys = []
+ values = []
+ for x in range(20000):
+ keys.append(str(x))
+ values.append(buffer.encode())
+
+ ioctx.set_omap(op, tuple(keys), tuple(values))
+ ioctx.operate_write_op(op, 'large-omap-test-object2', 0)
+ ioctx.release_write_op(op)
+ ioctx.close()
+ return cluster
+
+def get_deep_scrub_timestamp(pgid):
+ cmd = ['ceph', 'pg', 'dump', '--format=json-pretty']
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ out = proc.communicate()[0]
+ try:
+ pgstats = json.loads(out)['pg_map']['pg_stats']
+ except KeyError:
+ pgstats = json.loads(out)['pg_stats']
+ for stat in pgstats:
+ if stat['pgid'] == pgid:
+ return stat['last_deep_scrub_stamp']
+
+def wait_for_scrub():
+ osds = set();
+ pgs = dict();
+ cmd = ['ceph', 'osd', 'map', 'large-omap-test-pool',
+ 'large-omap-test-object1', '--format=json-pretty']
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ out = proc.communicate()[0]
+ osds.add(json.loads(out)['acting_primary'])
+ pgs[json.loads(out)['pgid']] = get_deep_scrub_timestamp(json.loads(out)['pgid'])
+ cmd = ['ceph', 'osd', 'map', 'large-omap-test-pool',
+ 'large-omap-test-object2', '--format=json-pretty']
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ out = proc.communicate()[0]
+ osds.add(json.loads(out)['acting_primary'])
+ pgs[json.loads(out)['pgid']] = get_deep_scrub_timestamp(json.loads(out)['pgid'])
+
+ for pg in pgs:
+ command = "ceph pg deep-scrub " + str(pg)
+ subprocess.check_call(shlex.split(command))
+
+ for pg in pgs:
+ RETRIES = 0
+ while RETRIES < 60 and pgs[pg] == get_deep_scrub_timestamp(pg):
+ time.sleep(10)
+ RETRIES += 1
+
+def check_health_output():
+ RETRIES = 0
+ result = 0
+ while RETRIES < 6 and result != 2:
+ result = 0
+ RETRIES += 1
+ output = subprocess.check_output(["ceph", "health", "detail"])
+ for line in output.splitlines():
+ result += int(line.find(b'2 large omap objects') != -1)
+ time.sleep(10)
+
+ if result != 2:
+ print("Error, got invalid output:")
+ print(output)
+ raise Exception
+
+def main():
+ cluster = init()
+ wait_for_scrub()
+ check_health_output()
+
+ cleanup(cluster)
+
+if __name__ == '__main__':
+ main()
diff --git a/qa/workunits/rados/test_libcephsqlite.sh b/qa/workunits/rados/test_libcephsqlite.sh
new file mode 100755
index 000000000..1810a3f3f
--- /dev/null
+++ b/qa/workunits/rados/test_libcephsqlite.sh
@@ -0,0 +1,136 @@
+#!/bin/bash -ex
+
+# The main point of these tests beyond ceph_test_libcephsqlite is to:
+#
+# - Ensure you can load the Ceph VFS via the dynamic load extension mechanism
+# in SQLite.
+# - Check the behavior of a dead application, that it does not hold locks
+# indefinitely.
+
+pool="$1"
+ns="$(basename $0)"
+
+function sqlite {
+ background="$1"
+ if [ "$background" = b ]; then
+ shift
+ fi
+ a=$(cat)
+ printf "%s" "$a" >&2
+ # We're doing job control gymnastics here to make sure that sqlite3 is the
+ # main process (i.e. the process group leader) in the background, not a bash
+ # function or job pipeline.
+ sqlite3 -cmd '.output /dev/null' -cmd '.load libcephsqlite.so' -cmd 'pragma journal_mode = PERSIST' -cmd ".open file:///$pool:$ns/baz.db?vfs=ceph" -cmd '.output stdout' <<<"$a" &
+ if [ "$background" != b ]; then
+ wait
+ fi
+}
+
+function striper {
+ rados --pool=$pool --namespace="$ns" --striper "$@"
+}
+
+function repeat {
+ n=$1
+ shift
+ for ((i = 0; i < "$n"; ++i)); do
+ echo "$*"
+ done
+}
+
+striper rm baz.db || true
+
+time sqlite <<EOF
+create table if not exists foo (a INT);
+insert into foo (a) values (RANDOM());
+drop table foo;
+EOF
+
+striper stat baz.db
+striper rm baz.db
+
+time sqlite <<EOF
+CREATE TABLE IF NOT EXISTS rand(text BLOB NOT NULL);
+$(repeat 10 'INSERT INTO rand (text) VALUES (RANDOMBLOB(4096));')
+SELECT LENGTH(text) FROM rand;
+DROP TABLE rand;
+EOF
+
+time sqlite <<EOF
+BEGIN TRANSACTION;
+CREATE TABLE IF NOT EXISTS rand(text BLOB NOT NULL);
+$(repeat 100 'INSERT INTO rand (text) VALUES (RANDOMBLOB(4096));')
+COMMIT;
+SELECT LENGTH(text) FROM rand;
+DROP TABLE rand;
+EOF
+
+# Connection death drops the lock:
+
+striper rm baz.db
+date
+sqlite b <<EOF
+CREATE TABLE foo (a BLOB);
+INSERT INTO foo VALUES ("start");
+WITH RECURSIVE c(x) AS
+ (
+ VALUES(1)
+ UNION ALL
+ SELECT x+1
+ FROM c
+ )
+INSERT INTO foo (a)
+ SELECT RANDOMBLOB(1<<20)
+ FROM c
+ LIMIT (1<<20);
+EOF
+
+# Let it chew on that INSERT for a while so it writes data, it will not finish as it's trying to write 2^40 bytes...
+sleep 10
+echo done
+
+jobs -l
+kill -KILL -- $(jobs -p)
+date
+wait
+date
+
+n=$(sqlite <<<"SELECT COUNT(*) FROM foo;")
+[ "$n" -eq 1 ]
+
+# Connection "hang" loses the lock and cannot reacquire it:
+
+striper rm baz.db
+date
+sqlite b <<EOF
+CREATE TABLE foo (a BLOB);
+INSERT INTO foo VALUES ("start");
+WITH RECURSIVE c(x) AS
+ (
+ VALUES(1)
+ UNION ALL
+ SELECT x+1
+ FROM c
+ )
+INSERT INTO foo (a)
+ SELECT RANDOMBLOB(1<<20)
+ FROM c
+ LIMIT (1<<20);
+EOF
+
+# Same thing, let it chew on the INSERT for a while...
+sleep 20
+jobs -l
+kill -STOP -- $(jobs -p)
+# cephsqlite_lock_renewal_timeout is 30s
+sleep 45
+date
+kill -CONT -- $(jobs -p)
+sleep 10
+date
+# it should exit with an error as it lost the lock
+wait
+date
+
+n=$(sqlite <<<"SELECT COUNT(*) FROM foo;")
+[ "$n" -eq 1 ]
diff --git a/qa/workunits/rados/test_librados_build.sh b/qa/workunits/rados/test_librados_build.sh
new file mode 100755
index 000000000..14e332515
--- /dev/null
+++ b/qa/workunits/rados/test_librados_build.sh
@@ -0,0 +1,87 @@
+#!/bin/bash -ex
+#
+# Compile and run a librados application outside of the ceph build system, so
+# that we can be sure librados.h[pp] is still usable and hasn't accidentally
+# started depending on internal headers.
+#
+# The script assumes all dependencies - e.g. curl, make, gcc, librados headers,
+# libradosstriper headers, boost headers, etc. - are already installed.
+#
+
+source $(dirname $0)/../ceph-helpers-root.sh
+
+trap cleanup EXIT
+
+SOURCES="hello_radosstriper.cc
+hello_world_c.c
+hello_world.cc
+Makefile
+"
+BINARIES_TO_RUN="hello_world_c
+hello_world_cpp
+"
+BINARIES="${BINARIES_TO_RUN}hello_radosstriper_cpp
+"
+# parse output like "octopus (dev)"
+case $(librados-config --release | grep -Po ' \(\K[^\)]+') in
+ dev)
+ BRANCH=main;;
+ rc|stable)
+ BRANCH=$(librados-config --release | cut -d' ' -f1);;
+ *)
+ echo "unknown release '$(librados-config --release)'" >&2
+ return 1;;
+esac
+DL_PREFIX="http://git.ceph.com/?p=ceph.git;a=blob_plain;hb=${BRANCH};f=examples/librados/"
+#DL_PREFIX="https://raw.githubusercontent.com/ceph/ceph/master/examples/librados/"
+DESTDIR=$(pwd)
+
+function cleanup () {
+ for f in $BINARIES$SOURCES ; do
+ rm -f "${DESTDIR}/$f"
+ done
+}
+
+function get_sources () {
+ for s in $SOURCES ; do
+ curl --progress-bar --output $s -L ${DL_PREFIX}$s
+ done
+}
+
+function check_sources () {
+ for s in $SOURCES ; do
+ test -f $s
+ done
+}
+
+function check_binaries () {
+ for b in $BINARIES ; do
+ file $b
+ test -f $b
+ done
+}
+
+function run_binaries () {
+ for b in $BINARIES_TO_RUN ; do
+ ./$b -c /etc/ceph/ceph.conf
+ done
+}
+
+pushd $DESTDIR
+case $(distro_id) in
+ centos|fedora|rhel|opensuse*|suse|sles)
+ install gcc-c++ make libradospp-devel librados-devel;;
+ ubuntu)
+ install gcc-11 g++-11 make libradospp-dev librados-dev
+ export CXX_FLAGS="-std=c++20";;
+ debian|devuan|softiron)
+ install g++ make libradospp-dev librados-dev;;
+ *)
+ echo "$(distro_id) is unknown, $@ will have to be installed manually."
+esac
+get_sources
+check_sources
+make all-system
+check_binaries
+run_binaries
+popd
diff --git a/qa/workunits/rados/test_pool_access.sh b/qa/workunits/rados/test_pool_access.sh
new file mode 100755
index 000000000..4082870bc
--- /dev/null
+++ b/qa/workunits/rados/test_pool_access.sh
@@ -0,0 +1,108 @@
+#!/usr/bin/env bash
+
+set -ex
+
+KEYRING=$(mktemp)
+trap cleanup EXIT ERR HUP INT QUIT
+
+cleanup() {
+ (ceph auth del client.mon_read || true) >/dev/null 2>&1
+ (ceph auth del client.mon_write || true) >/dev/null 2>&1
+
+ rm -f $KEYRING
+}
+
+expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+create_pool_op() {
+ ID=$1
+ POOL=$2
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python3
+import rados
+
+cluster = rados.Rados(conffile="", rados_id="${ID}")
+cluster.connect()
+cluster.create_pool("${POOL}")
+EOF
+}
+
+delete_pool_op() {
+ ID=$1
+ POOL=$2
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python3
+import rados
+
+cluster = rados.Rados(conffile="", rados_id="${ID}")
+cluster.connect()
+cluster.delete_pool("${POOL}")
+EOF
+}
+
+create_pool_snap_op() {
+ ID=$1
+ POOL=$2
+ SNAP=$3
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python3
+import rados
+
+cluster = rados.Rados(conffile="", rados_id="${ID}")
+cluster.connect()
+ioctx = cluster.open_ioctx("${POOL}")
+
+ioctx.create_snap("${SNAP}")
+EOF
+}
+
+remove_pool_snap_op() {
+ ID=$1
+ POOL=$2
+ SNAP=$3
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python3
+import rados
+
+cluster = rados.Rados(conffile="", rados_id="${ID}")
+cluster.connect()
+ioctx = cluster.open_ioctx("${POOL}")
+
+ioctx.remove_snap("${SNAP}")
+EOF
+}
+
+test_pool_op()
+{
+ ceph auth get-or-create client.mon_read mon 'allow r' >> $KEYRING
+ ceph auth get-or-create client.mon_write mon 'allow *' >> $KEYRING
+
+ expect_false create_pool_op mon_read pool1
+ create_pool_op mon_write pool1
+
+ expect_false create_pool_snap_op mon_read pool1 snap1
+ create_pool_snap_op mon_write pool1 snap1
+
+ expect_false remove_pool_snap_op mon_read pool1 snap1
+ remove_pool_snap_op mon_write pool1 snap1
+
+ expect_false delete_pool_op mon_read pool1
+ delete_pool_op mon_write pool1
+}
+
+key=`ceph auth get-or-create-key client.poolaccess1 mon 'allow r' osd 'allow *'`
+rados --id poolaccess1 --key $key -p rbd ls
+
+key=`ceph auth get-or-create-key client.poolaccess2 mon 'allow r' osd 'allow * pool=nopool'`
+expect_false rados --id poolaccess2 --key $key -p rbd ls
+
+key=`ceph auth get-or-create-key client.poolaccess3 mon 'allow r' osd 'allow rw pool=nopool'`
+expect_false rados --id poolaccess3 --key $key -p rbd ls
+
+test_pool_op
+
+echo OK
diff --git a/qa/workunits/rados/test_pool_quota.sh b/qa/workunits/rados/test_pool_quota.sh
new file mode 100755
index 000000000..0eacefc64
--- /dev/null
+++ b/qa/workunits/rados/test_pool_quota.sh
@@ -0,0 +1,68 @@
+#!/bin/sh -ex
+
+p=`uuidgen`
+
+# objects
+ceph osd pool create $p 12
+ceph osd pool set-quota $p max_objects 10
+ceph osd pool application enable $p rados
+
+for f in `seq 1 10` ; do
+ rados -p $p put obj$f /etc/passwd
+done
+
+sleep 30
+
+rados -p $p put onemore /etc/passwd &
+pid=$!
+
+ceph osd pool set-quota $p max_objects 100
+wait $pid
+[ $? -ne 0 ] && exit 1 || true
+
+rados -p $p put twomore /etc/passwd
+
+# bytes
+ceph osd pool set-quota $p max_bytes 100
+sleep 30
+
+rados -p $p put two /etc/passwd &
+pid=$!
+
+ceph osd pool set-quota $p max_bytes 0
+ceph osd pool set-quota $p max_objects 0
+wait $pid
+[ $? -ne 0 ] && exit 1 || true
+
+rados -p $p put three /etc/passwd
+
+
+#one pool being full does not block a different pool
+
+pp=`uuidgen`
+
+ceph osd pool create $pp 12
+ceph osd pool application enable $pp rados
+
+# set objects quota
+ceph osd pool set-quota $pp max_objects 10
+sleep 30
+
+for f in `seq 1 10` ; do
+ rados -p $pp put obj$f /etc/passwd
+done
+
+sleep 30
+
+rados -p $p put threemore /etc/passwd
+
+ceph osd pool set-quota $p max_bytes 0
+ceph osd pool set-quota $p max_objects 0
+
+sleep 30
+# done
+ceph osd pool delete $p $p --yes-i-really-really-mean-it
+ceph osd pool delete $pp $pp --yes-i-really-really-mean-it
+
+echo OK
+
diff --git a/qa/workunits/rados/test_python.sh b/qa/workunits/rados/test_python.sh
new file mode 100755
index 000000000..cf4597a41
--- /dev/null
+++ b/qa/workunits/rados/test_python.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -ex
+
+ceph osd pool create rbd
+${PYTHON:-python3} -m pytest -v $(dirname $0)/../../../src/test/pybind/test_rados.py "$@"
+exit 0
diff --git a/qa/workunits/rados/test_rados_timeouts.sh b/qa/workunits/rados/test_rados_timeouts.sh
new file mode 100755
index 000000000..327c7ab32
--- /dev/null
+++ b/qa/workunits/rados/test_rados_timeouts.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+set -x
+
+delay_mon() {
+ MSGTYPE=$1
+ shift
+ $@ --rados-mon-op-timeout 1 --ms-inject-delay-type mon --ms-inject-delay-max 10000000 --ms-inject-delay-probability 1 --ms-inject-delay-msg-type $MSGTYPE
+ if [ $? -eq 0 ]; then
+ exit 1
+ fi
+}
+
+delay_osd() {
+ MSGTYPE=$1
+ shift
+ $@ --rados-osd-op-timeout 1 --ms-inject-delay-type osd --ms-inject-delay-max 10000000 --ms-inject-delay-probability 1 --ms-inject-delay-msg-type $MSGTYPE
+ if [ $? -eq 0 ]; then
+ exit 2
+ fi
+}
+
+# pool ops
+delay_mon omap rados lspools
+delay_mon poolopreply ceph osd pool create test 8
+delay_mon poolopreply rados mksnap -p test snap
+delay_mon poolopreply ceph osd pool rm test test --yes-i-really-really-mean-it
+
+# other mon ops
+delay_mon getpoolstats rados df
+delay_mon mon_command ceph df
+delay_mon omap ceph osd dump
+delay_mon omap ceph -s
+
+# osd ops
+delay_osd osd_op_reply rados -p data put ls /bin/ls
+delay_osd osd_op_reply rados -p data get ls - >/dev/null
+delay_osd osd_op_reply rados -p data ls
+delay_osd command_reply ceph tell osd.0 bench 1 1
+
+# rbd commands, using more kinds of osd ops
+rbd create -s 1 test
+delay_osd osd_op_reply rbd watch test
+delay_osd osd_op_reply rbd info test
+delay_osd osd_op_reply rbd snap create test@snap
+delay_osd osd_op_reply rbd import /bin/ls ls
+rbd rm test
+
+echo OK
diff --git a/qa/workunits/rados/test_rados_tool.sh b/qa/workunits/rados/test_rados_tool.sh
new file mode 100755
index 000000000..9d025eee8
--- /dev/null
+++ b/qa/workunits/rados/test_rados_tool.sh
@@ -0,0 +1,924 @@
+#!/usr/bin/env bash
+
+set -x
+
+die() {
+ echo "$@"
+ exit 1
+}
+
+usage() {
+ cat <<EOF
+test_rados_tool.sh: tests rados_tool
+-c: RADOS configuration file to use [optional]
+-k: keep temp files
+-h: this help message
+-p: set temporary pool to use [optional]
+EOF
+}
+
+do_run() {
+ if [ "$1" == "--tee" ]; then
+ shift
+ tee_out="$1"
+ shift
+ "$@" | tee $tee_out
+ else
+ "$@"
+ fi
+}
+
+run_expect_fail() {
+ echo "RUN_EXPECT_FAIL: " "$@"
+ do_run "$@"
+ [ $? -eq 0 ] && die "expected failure, but got success! cmd: $@"
+}
+
+run_expect_succ() {
+ echo "RUN_EXPECT_SUCC: " "$@"
+ do_run "$@"
+ [ $? -ne 0 ] && die "expected success, but got failure! cmd: $@"
+}
+
+run_expect_nosignal() {
+ echo "RUN_EXPECT_NOSIGNAL: " "$@"
+ do_run "$@"
+ [ $? -ge 128 ] && die "expected success or fail, but got signal! cmd: $@"
+}
+
+run() {
+ echo "RUN: " $@
+ do_run "$@"
+}
+
+if [ -n "$CEPH_BIN" ] ; then
+ # CMake env
+ RADOS_TOOL="$CEPH_BIN/rados"
+ CEPH_TOOL="$CEPH_BIN/ceph"
+else
+ # executables should be installed by the QA env
+ RADOS_TOOL=$(which rados)
+ CEPH_TOOL=$(which ceph)
+fi
+
+KEEP_TEMP_FILES=0
+POOL=trs_pool
+POOL_CP_TARGET=trs_pool.2
+POOL_EC=trs_pool_ec
+
+[ -x "$RADOS_TOOL" ] || die "couldn't find $RADOS_TOOL binary to test"
+[ -x "$CEPH_TOOL" ] || die "couldn't find $CEPH_TOOL binary to test"
+
+while getopts "c:hkp:" flag; do
+ case $flag in
+ c) RADOS_TOOL="$RADOS_TOOL -c $OPTARG";;
+ k) KEEP_TEMP_FILES=1;;
+ h) usage; exit 0;;
+ p) POOL=$OPTARG;;
+ *) echo; usage; exit 1;;
+ esac
+done
+
+TDIR=`mktemp -d -t test_rados_tool.XXXXXXXXXX` || die "mktemp failed"
+[ $KEEP_TEMP_FILES -eq 0 ] && trap "rm -rf ${TDIR}; exit" INT TERM EXIT
+
+# ensure rados doesn't segfault without --pool
+run_expect_nosignal "$RADOS_TOOL" --snap "asdf" ls
+run_expect_nosignal "$RADOS_TOOL" --snapid "0" ls
+run_expect_nosignal "$RADOS_TOOL" --object-locator "asdf" ls
+run_expect_nosignal "$RADOS_TOOL" --namespace "asdf" ls
+
+run_expect_succ "$CEPH_TOOL" osd pool create "$POOL" 8
+run_expect_succ "$CEPH_TOOL" osd erasure-code-profile set myprofile k=2 m=1 stripe_unit=2K crush-failure-domain=osd --force
+run_expect_succ "$CEPH_TOOL" osd pool create "$POOL_EC" 100 100 erasure myprofile
+
+
+# expb happens to be the empty export for legacy reasons
+run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/expb"
+
+# expa has objects foo, foo2 and bar
+run_expect_succ "$RADOS_TOOL" -p "$POOL" put foo /etc/fstab
+run_expect_succ "$RADOS_TOOL" -p "$POOL" put foo2 /etc/fstab
+run_expect_succ "$RADOS_TOOL" -p "$POOL" put bar /etc/fstab
+run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/expa"
+
+# expc has foo and foo2 with some attributes and omaps set
+run_expect_succ "$RADOS_TOOL" -p "$POOL" rm bar
+run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo "rados.toothbrush" "toothbrush"
+run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo "rados.toothpaste" "crest"
+run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapval foo "rados.floss" "myfloss"
+run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo2 "rados.toothbrush" "green"
+run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapheader foo2 "foo2.header"
+run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/expc"
+
+# make sure that --create works
+run "$CEPH_TOOL" osd pool rm "$POOL" "$POOL" --yes-i-really-really-mean-it
+run_expect_succ "$RADOS_TOOL" -p "$POOL" --create import "$TDIR/expa"
+
+# make sure that lack of --create fails
+run_expect_succ "$CEPH_TOOL" osd pool rm "$POOL" "$POOL" --yes-i-really-really-mean-it
+run_expect_fail "$RADOS_TOOL" -p "$POOL" import "$TDIR/expa"
+
+run_expect_succ "$RADOS_TOOL" -p "$POOL" --create import "$TDIR/expa"
+
+# inaccessible import src should fail
+run_expect_fail "$RADOS_TOOL" -p "$POOL" import "$TDIR/dir_nonexistent"
+
+# export an empty pool to test purge
+run_expect_succ "$RADOS_TOOL" purge "$POOL" --yes-i-really-really-mean-it
+run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/empty"
+cmp -s "$TDIR/expb" "$TDIR/empty" \
+ || die "failed to export the same stuff we imported!"
+rm -f "$TDIR/empty"
+
+# import some stuff with extended attributes on it
+run_expect_succ "$RADOS_TOOL" -p "$POOL" import "$TDIR/expc"
+VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
+[ ${VAL} = "toothbrush" ] || die "Invalid attribute after import"
+
+# the second time, the xattrs should match, so there should be nothing to do.
+run_expect_succ "$RADOS_TOOL" -p "$POOL" import "$TDIR/expc"
+VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
+[ "${VAL}" = "toothbrush" ] || die "Invalid attribute after second import"
+
+# Now try with --no-overwrite option after changing an attribute
+run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo "rados.toothbrush" "dentist"
+run_expect_succ "$RADOS_TOOL" -p "$POOL" import --no-overwrite "$TDIR/expc"
+VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
+[ "${VAL}" = "dentist" ] || die "Invalid attribute after second import"
+
+# now force it to copy everything
+run_expect_succ "$RADOS_TOOL" -p "$POOL" import "$TDIR/expc"
+VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
+[ "${VAL}" = "toothbrush" ] || die "Invalid attribute after second import"
+
+# test copy pool
+run "$CEPH_TOOL" osd pool rm "$POOL" "$POOL" --yes-i-really-really-mean-it
+run "$CEPH_TOOL" osd pool rm "$POOL_CP_TARGET" "$POOL_CP_TARGET" --yes-i-really-really-mean-it
+run_expect_succ "$CEPH_TOOL" osd pool create "$POOL" 8
+run_expect_succ "$CEPH_TOOL" osd pool create "$POOL_CP_TARGET" 8
+
+# create src files
+mkdir -p "$TDIR/dir_cp_src"
+for i in `seq 1 5`; do
+ fname="$TDIR/dir_cp_src/f.$i"
+ objname="f.$i"
+ dd if=/dev/urandom of="$fname" bs=$((1024*1024)) count=$i
+ run_expect_succ "$RADOS_TOOL" -p "$POOL" put $objname "$fname"
+
+# a few random attrs
+ for j in `seq 1 4`; do
+ rand_str=`dd if=/dev/urandom bs=4 count=1 | hexdump -x`
+ run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr $objname attr.$j "$rand_str"
+ run_expect_succ --tee "$fname.attr.$j" "$RADOS_TOOL" -p "$POOL" getxattr $objname attr.$j
+ done
+
+ rand_str=`dd if=/dev/urandom bs=4 count=1 | hexdump -x`
+ run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapheader $objname "$rand_str"
+ run_expect_succ --tee "$fname.omap.header" "$RADOS_TOOL" -p "$POOL" getomapheader $objname
+
+# a few random omap keys
+ for j in `seq 1 4`; do
+ rand_str=`dd if=/dev/urandom bs=4 count=1 | hexdump -x`
+ run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapval $objname key.$j "$rand_str"
+ done
+ run_expect_succ --tee "$fname.omap.vals" "$RADOS_TOOL" -p "$POOL" listomapvals $objname
+done
+
+run_expect_succ "$RADOS_TOOL" cppool "$POOL" "$POOL_CP_TARGET"
+
+mkdir -p "$TDIR/dir_cp_dst"
+for i in `seq 1 5`; do
+ fname="$TDIR/dir_cp_dst/f.$i"
+ objname="f.$i"
+ run_expect_succ "$RADOS_TOOL" -p "$POOL_CP_TARGET" get $objname "$fname"
+
+# a few random attrs
+ for j in `seq 1 4`; do
+ run_expect_succ --tee "$fname.attr.$j" "$RADOS_TOOL" -p "$POOL_CP_TARGET" getxattr $objname attr.$j
+ done
+
+ run_expect_succ --tee "$fname.omap.header" "$RADOS_TOOL" -p "$POOL_CP_TARGET" getomapheader $objname
+ run_expect_succ --tee "$fname.omap.vals" "$RADOS_TOOL" -p "$POOL_CP_TARGET" listomapvals $objname
+done
+
+diff -q -r "$TDIR/dir_cp_src" "$TDIR/dir_cp_dst" \
+ || die "copy pool validation failed!"
+
+for opt in \
+ block-size \
+ concurrent-ios \
+ min-object-size \
+ max-object-size \
+ min-op-len \
+ max-op-len \
+ max-ops \
+ max-backlog \
+ target-throughput \
+ read-percent \
+ num-objects \
+ run-length \
+ ; do
+ run_expect_succ "$RADOS_TOOL" --$opt 4 df
+ run_expect_fail "$RADOS_TOOL" --$opt 4k df
+done
+
+run_expect_succ "$RADOS_TOOL" lock list f.1 --lock-duration 4 --pool "$POOL"
+echo # previous command doesn't output an end of line: issue #9735
+run_expect_fail "$RADOS_TOOL" lock list f.1 --lock-duration 4k --pool "$POOL"
+
+run_expect_succ "$RADOS_TOOL" mksnap snap1 --pool "$POOL"
+snapid=$("$RADOS_TOOL" lssnap --pool "$POOL" | grep snap1 | cut -f1)
+[ $? -ne 0 ] && die "expected success, but got failure! cmd: \"$RADOS_TOOL\" lssnap --pool \"$POOL\" | grep snap1 | cut -f1"
+run_expect_succ "$RADOS_TOOL" ls --pool "$POOL" --snapid="$snapid"
+run_expect_fail "$RADOS_TOOL" ls --pool "$POOL" --snapid="$snapid"k
+
+run_expect_succ "$RADOS_TOOL" truncate f.1 0 --pool "$POOL"
+run_expect_fail "$RADOS_TOOL" truncate f.1 0k --pool "$POOL"
+
+run "$CEPH_TOOL" osd pool rm delete_me_mkpool_test delete_me_mkpool_test --yes-i-really-really-mean-it
+run_expect_succ "$CEPH_TOOL" osd pool create delete_me_mkpool_test 1
+
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 write
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 1k write
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 write --format json --output "$TDIR/bench.json"
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 1 write --output "$TDIR/bench.json"
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --format json --no-cleanup
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 rand --format json
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 rand -f json
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 seq --format json
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 seq -f json
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-omap
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-object
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr --write-object
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr --write-omap
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-omap --write-object
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr --write-omap --write-object
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-omap
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-object
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr --write-object
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr --write-omap
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-omap --write-object
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr --write-omap --write-object
+
+for i in $("$RADOS_TOOL" --pool "$POOL" ls | grep "benchmark_data"); do
+ "$RADOS_TOOL" --pool "$POOL" truncate $i 0
+done
+
+run_expect_nosignal "$RADOS_TOOL" --pool "$POOL" bench 1 rand
+run_expect_nosignal "$RADOS_TOOL" --pool "$POOL" bench 1 seq
+
+set -e
+
+OBJ=test_rados_obj
+
+expect_false()
+{
+ if "$@"; then return 1; else return 0; fi
+}
+
+cleanup() {
+ $RADOS_TOOL -p $POOL rm $OBJ > /dev/null 2>&1 || true
+ $RADOS_TOOL -p $POOL_EC rm $OBJ > /dev/null 2>&1 || true
+}
+
+test_omap() {
+ cleanup
+ for i in $(seq 1 1 10)
+ do
+ if [ $(($i % 2)) -eq 0 ]; then
+ $RADOS_TOOL -p $POOL setomapval $OBJ $i $i
+ else
+ echo -n "$i" | $RADOS_TOOL -p $POOL setomapval $OBJ $i
+ fi
+ $RADOS_TOOL -p $POOL getomapval $OBJ $i | grep -q "|$i|\$"
+ done
+ $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 10
+ for i in $(seq 1 1 5)
+ do
+ $RADOS_TOOL -p $POOL rmomapkey $OBJ $i
+ done
+ $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 5
+ $RADOS_TOOL -p $POOL clearomap $OBJ
+ $RADOS_TOOL -p $POOL listomapvals $OBJ | wc -l | grep 0
+ cleanup
+
+ for i in $(seq 1 1 10)
+ do
+ dd if=/dev/urandom bs=128 count=1 > $TDIR/omap_key
+ if [ $(($i % 2)) -eq 0 ]; then
+ $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key setomapval $OBJ $i
+ else
+ echo -n "$i" | $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key setomapval $OBJ
+ fi
+ $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key getomapval $OBJ | grep -q "|$i|\$"
+ $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key rmomapkey $OBJ
+ $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 0
+ done
+ cleanup
+}
+
+test_xattr() {
+ cleanup
+ $RADOS_TOOL -p $POOL put $OBJ /etc/passwd
+ V1=`mktemp fooattrXXXXXXX`
+ V2=`mktemp fooattrXXXXXXX`
+ echo -n fooval > $V1
+ expect_false $RADOS_TOOL -p $POOL setxattr $OBJ 2>/dev/null
+ expect_false $RADOS_TOOL -p $POOL setxattr $OBJ foo fooval extraarg 2>/dev/null
+ $RADOS_TOOL -p $POOL setxattr $OBJ foo fooval
+ $RADOS_TOOL -p $POOL getxattr $OBJ foo > $V2
+ cmp $V1 $V2
+ cat $V1 | $RADOS_TOOL -p $POOL setxattr $OBJ bar
+ $RADOS_TOOL -p $POOL getxattr $OBJ bar > $V2
+ cmp $V1 $V2
+ $RADOS_TOOL -p $POOL listxattr $OBJ > $V1
+ grep -q foo $V1
+ grep -q bar $V1
+ [ `cat $V1 | wc -l` -eq 2 ]
+ rm $V1 $V2
+ cleanup
+}
+test_rmobj() {
+ p=`uuidgen`
+ $CEPH_TOOL osd pool create $p 1
+ $CEPH_TOOL osd pool set-quota $p max_objects 1
+ V1=`mktemp fooattrXXXXXXX`
+ $RADOS_TOOL put $OBJ $V1 -p $p
+ while ! $CEPH_TOOL osd dump | grep 'full_quota max_objects'
+ do
+ sleep 2
+ done
+ $RADOS_TOOL -p $p rm $OBJ --force-full
+ $CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it
+ rm $V1
+}
+
+test_ls() {
+ echo "Testing rados ls command"
+ p=`uuidgen`
+ $CEPH_TOOL osd pool create $p 1
+ NS=10
+ OBJS=20
+ # Include default namespace (0) in the total
+ TOTAL=$(expr $OBJS \* $(expr $NS + 1))
+
+ for nsnum in `seq 0 $NS`
+ do
+ for onum in `seq 1 $OBJS`
+ do
+ if [ "$nsnum" = "0" ];
+ then
+ "$RADOS_TOOL" -p $p put obj${onum} /etc/fstab 2> /dev/null
+ else
+ "$RADOS_TOOL" -p $p -N "NS${nsnum}" put obj${onum} /etc/fstab 2> /dev/null
+ fi
+ done
+ done
+ CHECK=$("$RADOS_TOOL" -p $p ls 2> /dev/null | wc -l)
+ if [ "$OBJS" -ne "$CHECK" ];
+ then
+ die "Created $OBJS objects in default namespace but saw $CHECK"
+ fi
+ TESTNS=NS${NS}
+ CHECK=$("$RADOS_TOOL" -p $p -N $TESTNS ls 2> /dev/null | wc -l)
+ if [ "$OBJS" -ne "$CHECK" ];
+ then
+ die "Created $OBJS objects in $TESTNS namespace but saw $CHECK"
+ fi
+ CHECK=$("$RADOS_TOOL" -p $p --all ls 2> /dev/null | wc -l)
+ if [ "$TOTAL" -ne "$CHECK" ];
+ then
+ die "Created $TOTAL objects but saw $CHECK"
+ fi
+
+ $CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it
+}
+
+test_cleanup() {
+ echo "Testing rados cleanup command"
+ p=`uuidgen`
+ $CEPH_TOOL osd pool create $p 1
+ NS=5
+ OBJS=4
+ # Include default namespace (0) in the total
+ TOTAL=$(expr $OBJS \* $(expr $NS + 1))
+
+ for nsnum in `seq 0 $NS`
+ do
+ for onum in `seq 1 $OBJS`
+ do
+ if [ "$nsnum" = "0" ];
+ then
+ "$RADOS_TOOL" -p $p put obj${onum} /etc/fstab 2> /dev/null
+ else
+ "$RADOS_TOOL" -p $p -N "NS${nsnum}" put obj${onum} /etc/fstab 2> /dev/null
+ fi
+ done
+ done
+
+ $RADOS_TOOL -p $p --all ls > $TDIR/before.ls.out 2> /dev/null
+
+ $RADOS_TOOL -p $p bench 3 write --no-cleanup 2> /dev/null
+ $RADOS_TOOL -p $p -N NS1 bench 3 write --no-cleanup 2> /dev/null
+ $RADOS_TOOL -p $p -N NS2 bench 3 write --no-cleanup 2> /dev/null
+ $RADOS_TOOL -p $p -N NS3 bench 3 write --no-cleanup 2> /dev/null
+ # Leave dangling objects without a benchmark_last_metadata in NS4
+ expect_false timeout 3 $RADOS_TOOL -p $p -N NS4 bench 30 write --no-cleanup 2> /dev/null
+ $RADOS_TOOL -p $p -N NS5 bench 3 write --no-cleanup 2> /dev/null
+
+ $RADOS_TOOL -p $p -N NS3 cleanup 2> /dev/null
+ #echo "Check NS3 after specific cleanup"
+ CHECK=$($RADOS_TOOL -p $p -N NS3 ls | wc -l)
+ if [ "$OBJS" -ne "$CHECK" ] ;
+ then
+ die "Expected $OBJS objects in NS3 but saw $CHECK"
+ fi
+
+ #echo "Try to cleanup all"
+ $RADOS_TOOL -p $p --all cleanup
+ #echo "Check all namespaces"
+ $RADOS_TOOL -p $p --all ls > $TDIR/after.ls.out 2> /dev/null
+ CHECK=$(cat $TDIR/after.ls.out | wc -l)
+ if [ "$TOTAL" -ne "$CHECK" ];
+ then
+ die "Expected $TOTAL objects but saw $CHECK"
+ fi
+ if ! diff $TDIR/before.ls.out $TDIR/after.ls.out
+ then
+ die "Different objects found after cleanup"
+ fi
+
+ set +e
+ run_expect_fail $RADOS_TOOL -p $p cleanup --prefix illegal_prefix
+ run_expect_succ $RADOS_TOOL -p $p cleanup --prefix benchmark_data_otherhost
+ set -e
+
+ $CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it
+}
+
+function test_append()
+{
+ cleanup
+
+ # create object
+ touch ./rados_append_null
+ $RADOS_TOOL -p $POOL append $OBJ ./rados_append_null
+ $RADOS_TOOL -p $POOL get $OBJ ./rados_append_0_out
+ cmp ./rados_append_null ./rados_append_0_out
+
+ # append 4k, total size 4k
+ dd if=/dev/zero of=./rados_append_4k bs=4k count=1
+ $RADOS_TOOL -p $POOL append $OBJ ./rados_append_4k
+ $RADOS_TOOL -p $POOL get $OBJ ./rados_append_4k_out
+ cmp ./rados_append_4k ./rados_append_4k_out
+
+ # append 4k, total size 8k
+ $RADOS_TOOL -p $POOL append $OBJ ./rados_append_4k
+ $RADOS_TOOL -p $POOL get $OBJ ./rados_append_4k_out
+ read_size=`ls -l ./rados_append_4k_out | awk -F ' ' '{print $5}'`
+ if [ 8192 -ne $read_size ];
+ then
+ die "Append failed expecting 8192 read $read_size"
+ fi
+
+ # append 10M, total size 10493952
+ dd if=/dev/zero of=./rados_append_10m bs=10M count=1
+ $RADOS_TOOL -p $POOL append $OBJ ./rados_append_10m
+ $RADOS_TOOL -p $POOL get $OBJ ./rados_append_10m_out
+ read_size=`ls -l ./rados_append_10m_out | awk -F ' ' '{print $5}'`
+ if [ 10493952 -ne $read_size ];
+ then
+ die "Append failed expecting 10493952 read $read_size"
+ fi
+
+ # cleanup
+ cleanup
+
+ # create object
+ $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_null
+ $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_0_out
+ cmp rados_append_null rados_append_0_out
+
+ # append 4k, total size 4k
+ $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_4k
+ $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_4k_out
+ cmp rados_append_4k rados_append_4k_out
+
+ # append 4k, total size 8k
+ $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_4k
+ $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_4k_out
+ read_size=`ls -l ./rados_append_4k_out | awk -F ' ' '{print $5}'`
+ if [ 8192 -ne $read_size ];
+ then
+ die "Append failed expecting 8192 read $read_size"
+ fi
+
+ # append 10M, total size 10493952
+ $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_10m
+ $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_10m_out
+ read_size=`ls -l ./rados_append_10m_out | awk -F ' ' '{print $5}'`
+ if [ 10493952 -ne $read_size ];
+ then
+ die "Append failed expecting 10493952 read $read_size"
+ fi
+
+ cleanup
+ rm -rf ./rados_append_null ./rados_append_0_out
+ rm -rf ./rados_append_4k ./rados_append_4k_out ./rados_append_10m ./rados_append_10m_out
+}
+
+function test_put()
+{
+ # rados put test:
+ cleanup
+
+ # create file in local fs
+ dd if=/dev/urandom of=rados_object_10k bs=1K count=10
+
+ # test put command
+ $RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k
+ $RADOS_TOOL -p $POOL get $OBJ ./rados_object_10k_out
+ cmp ./rados_object_10k ./rados_object_10k_out
+ cleanup
+
+ # test put command with offset 0
+ $RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k --offset 0
+ $RADOS_TOOL -p $POOL get $OBJ ./rados_object_offset_0_out
+ cmp ./rados_object_10k ./rados_object_offset_0_out
+ cleanup
+
+ # test put command with offset 1000
+ $RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k --offset 1000
+ $RADOS_TOOL -p $POOL get $OBJ ./rados_object_offset_1000_out
+ cmp ./rados_object_10k ./rados_object_offset_1000_out 0 1000
+ cleanup
+
+ rm -rf ./rados_object_10k ./rados_object_10k_out ./rados_object_offset_0_out ./rados_object_offset_1000_out
+}
+
+function test_stat()
+{
+ bluestore=$("$CEPH_TOOL" osd metadata | grep '"osd_objectstore": "bluestore"' | cut -f1)
+ # create file in local fs
+ dd if=/dev/urandom of=rados_object_128k bs=64K count=2
+
+ # rados df test (replicated_pool):
+ $RADOS_TOOL purge $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool rm $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool create $POOL 8
+ $CEPH_TOOL osd pool set $POOL size 3
+
+ # put object with 1 MB gap in front
+ $RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k --offset=1048576
+ MATCH_CNT=0
+ if [ "" == "$bluestore" ];
+ then
+ STORED=1.1
+ STORED_UNIT="MiB"
+ else
+ STORED=384
+ STORED_UNIT="KiB"
+ fi
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
+ [[ -z $IN ]] && sleep 1 && continue
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 3
+ if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "1" ] && [ ${VALS[13]} == 128 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ [[ -z $IN ]] && die "Failed to retrieve any pool stats within 60 seconds"
+ if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "1" ] || [ ${VALS[13]} != 128 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ # overwrite data at 1MB offset
+ $RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k --offset=1048576
+ MATCH_CNT=0
+ if [ "" == "$bluestore" ];
+ then
+ STORED=1.1
+ STORED_UNIT="MiB"
+ else
+ STORED=384
+ STORED_UNIT="KiB"
+ fi
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 3
+ if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "2" ] && [ ${VALS[13]} == 256 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "2" ] || [ ${VALS[13]} != 256 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ # write data at 64K offset
+ $RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k --offset=65536
+ MATCH_CNT=0
+ if [ "" == "$bluestore" ];
+ then
+ STORED=1.1
+ STORED_UNIT="MiB"
+ else
+ STORED=768
+ STORED_UNIT="KiB"
+ fi
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 3
+ if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "3" ] && [ ${VALS[13]} == 384 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "3" ] || [ ${VALS[13]} != 384 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ # overwrite object totally
+ $RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k
+ MATCH_CNT=0
+ if [ "" == "$bluestore" ];
+ then
+ STORED=128
+ STORED_UNIT="KiB"
+ else
+ STORED=384
+ STORED_UNIT="KiB"
+ fi
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 3
+ if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "4" ] && [ ${VALS[13]} == 512 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "4" ] || [ ${VALS[13]} != 512 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ cleanup
+
+ # after cleanup?
+ MATCH_CNT=0
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 3
+ if [ ${VALS[1]} == 0 ] && [ ${VALS[2]} == "B" ] && [ ${VALS[3]} == "0" ] && [ ${VALS[5]} == "0" ] && [ ${VALS[12]} == "5" ] && [ ${VALS[13]} == 512 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ if [ ${VALS[1]} != 0 ] || [ ${VALS[2]} != "B" ] || [ ${VALS[3]} != "0" ] || [ ${VALS[5]} != "0" ] || [ ${VALS[12]} != "5" ] || [ ${VALS[13]} != 512 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ ############ rados df test (EC pool): ##############
+ $RADOS_TOOL purge $POOL_EC --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool rm $POOL_EC $POOL_EC --yes-i-really-really-mean-it
+ $CEPH_TOOL osd erasure-code-profile set myprofile k=2 m=1 stripe_unit=2K crush-failure-domain=osd --force
+ $CEPH_TOOL osd pool create $POOL_EC 8 8 erasure
+
+ # put object
+ $RADOS_TOOL -p $POOL_EC put $OBJ ./rados_object_128k
+ MATCH_CNT=0
+ if [ "" == "$bluestore" ];
+ then
+ STORED=128
+ STORED_UNIT="KiB"
+ else
+ STORED=192
+ STORED_UNIT="KiB"
+ fi
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL_EC df | grep $POOL_EC ; [[ ! -z $? ]] && echo "")
+ [[ -z $IN ]] && sleep 1 && continue
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 2+1
+ if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "1" ] && [ ${VALS[13]} == 128 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ [[ -z $IN ]] && die "Failed to retrieve any pool stats within 60 seconds"
+ if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "1" ] || [ ${VALS[13]} != 128 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ # overwrite object
+ $RADOS_TOOL -p $POOL_EC put $OBJ ./rados_object_128k
+ MATCH_CNT=0
+ if [ "" == "$bluestore" ];
+ then
+ STORED=128
+ STORED_UNIT="KiB"
+ else
+ STORED=192
+ STORED_UNIT="KiB"
+ fi
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL_EC df | grep $POOL_EC ; [[ ! -z $? ]] && echo "")
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 2+1
+ if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "2" ] && [ ${VALS[13]} == 256 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "2" ] || [ ${VALS[13]} != 256 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ cleanup
+
+ # after cleanup?
+ MATCH_CNT=0
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL_EC df | grep $POOL_EC ; [[ ! -z $? ]] && echo "")
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 2+1
+ if [ ${VALS[1]} == 0 ] && [ ${VALS[2]} == "B" ] && [ ${VALS[3]} == "0" ] && [ ${VALS[5]} == "0" ] && [ ${VALS[12]} == "3" ] && [ ${VALS[13]} == 256 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ if [ ${VALS[1]} != 0 ] || [ ${VALS[2]} != "B" ] || [ ${VALS[3]} != "0" ] || [ ${VALS[5]} != "0" ] || [ ${VALS[12]} != "3" ] || [ ${VALS[13]} != 256 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ rm -rf ./rados_object_128k
+}
+
+test_xattr
+test_omap
+test_rmobj
+test_ls
+test_cleanup
+test_append
+test_put
+test_stat
+
+# clean up environment, delete pool
+$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+$CEPH_TOOL osd pool delete $POOL_EC $POOL_EC --yes-i-really-really-mean-it
+$CEPH_TOOL osd pool delete $POOL_CP_TARGET $POOL_CP_TARGET --yes-i-really-really-mean-it
+
+echo "SUCCESS!"
+exit 0
diff --git a/qa/workunits/rados/version_number_sanity.sh b/qa/workunits/rados/version_number_sanity.sh
new file mode 100755
index 000000000..e7eb9be64
--- /dev/null
+++ b/qa/workunits/rados/version_number_sanity.sh
@@ -0,0 +1,30 @@
+#!/bin/bash -ex
+#
+# test that ceph RPM/DEB package version matches "ceph --version"
+# (for a loose definition of "matches")
+#
+source /etc/os-release
+case $ID in
+debian|ubuntu)
+ RPMDEB='DEB'
+ dpkg-query --show ceph-common
+ PKG_NAME_AND_VERSION=$(dpkg-query --show ceph-common)
+ ;;
+centos|fedora|rhel|opensuse*|suse|sles)
+ RPMDEB='RPM'
+ rpm -q ceph
+ PKG_NAME_AND_VERSION=$(rpm -q ceph)
+ ;;
+*)
+ echo "Unsupported distro ->$ID<-! Bailing out."
+ exit 1
+esac
+PKG_CEPH_VERSION=$(perl -e '"'"$PKG_NAME_AND_VERSION"'" =~ m/(\d+(\.\d+)+)/; print "$1\n";')
+echo "According to $RPMDEB package, the ceph version under test is ->$PKG_CEPH_VERSION<-"
+test -n "$PKG_CEPH_VERSION"
+ceph --version
+BUFFER=$(ceph --version)
+CEPH_CEPH_VERSION=$(perl -e '"'"$BUFFER"'" =~ m/ceph version (\d+(\.\d+)+)/; print "$1\n";')
+echo "According to \"ceph --version\", the ceph version under test is ->$CEPH_CEPH_VERSION<-"
+test -n "$CEPH_CEPH_VERSION"
+test "$PKG_CEPH_VERSION" = "$CEPH_CEPH_VERSION"
diff --git a/qa/workunits/rbd/cli_generic.sh b/qa/workunits/rbd/cli_generic.sh
new file mode 100755
index 000000000..57279d26d
--- /dev/null
+++ b/qa/workunits/rbd/cli_generic.sh
@@ -0,0 +1,1715 @@
+#!/usr/bin/env bash
+set -ex
+
+. $(dirname $0)/../../standalone/ceph-helpers.sh
+
+export RBD_FORCE_ALLOW_V1=1
+
+# make sure rbd pool is EMPTY.. this is a test script!!
+rbd ls | wc -l | grep -v '^0$' && echo "nonempty rbd pool, aborting! run this script on an empty test cluster only." && exit 1
+
+IMGS="testimg1 testimg2 testimg3 testimg4 testimg5 testimg6 testimg-diff1 testimg-diff2 testimg-diff3 foo foo2 bar bar2 test1 test2 test3 test4 clone2"
+
+expect_fail() {
+ "$@" && return 1 || return 0
+}
+
+tiered=0
+if ceph osd dump | grep ^pool | grep "'rbd'" | grep tier; then
+ tiered=1
+fi
+
+remove_images() {
+ for img in $IMGS
+ do
+ (rbd snap purge $img || true) >/dev/null 2>&1
+ (rbd rm $img || true) >/dev/null 2>&1
+ done
+}
+
+test_others() {
+ echo "testing import, export, resize, and snapshots..."
+ TMP_FILES="/tmp/img1 /tmp/img1.new /tmp/img2 /tmp/img2.new /tmp/img3 /tmp/img3.new /tmp/img-diff1.new /tmp/img-diff2.new /tmp/img-diff3.new /tmp/img1.snap1 /tmp/img1.snap1 /tmp/img-diff1.snap1"
+
+ remove_images
+ rm -f $TMP_FILES
+
+ # create an image
+ dd if=/bin/sh of=/tmp/img1 bs=1k count=1 seek=10
+ dd if=/bin/dd of=/tmp/img1 bs=1k count=10 seek=100
+ dd if=/bin/rm of=/tmp/img1 bs=1k count=100 seek=1000
+ dd if=/bin/ls of=/tmp/img1 bs=1k seek=10000
+ dd if=/bin/ln of=/tmp/img1 bs=1k seek=100000
+
+ # import, snapshot
+ rbd import $RBD_CREATE_ARGS /tmp/img1 testimg1
+ rbd resize testimg1 --size=256 --allow-shrink
+ rbd export testimg1 /tmp/img2
+ rbd snap create testimg1 --snap=snap1
+ rbd resize testimg1 --size=128 && exit 1 || true # shrink should fail
+ rbd resize testimg1 --size=128 --allow-shrink
+ rbd export testimg1 /tmp/img3
+
+ # info
+ rbd info testimg1 | grep 'size 128 MiB'
+ rbd info --snap=snap1 testimg1 | grep 'size 256 MiB'
+
+ # export-diff
+ rm -rf /tmp/diff-testimg1-1 /tmp/diff-testimg1-2
+ rbd export-diff testimg1 --snap=snap1 /tmp/diff-testimg1-1
+ rbd export-diff testimg1 --from-snap=snap1 /tmp/diff-testimg1-2
+
+ # import-diff
+ rbd create $RBD_CREATE_ARGS --size=1 testimg-diff1
+ rbd import-diff --sparse-size 8K /tmp/diff-testimg1-1 testimg-diff1
+ rbd import-diff --sparse-size 8K /tmp/diff-testimg1-2 testimg-diff1
+
+ # info
+ rbd info testimg1 | grep 'size 128 MiB'
+ rbd info --snap=snap1 testimg1 | grep 'size 256 MiB'
+ rbd info testimg-diff1 | grep 'size 128 MiB'
+ rbd info --snap=snap1 testimg-diff1 | grep 'size 256 MiB'
+
+ # make copies
+ rbd copy testimg1 --snap=snap1 testimg2
+ rbd copy testimg1 testimg3
+ rbd copy testimg-diff1 --sparse-size 768K --snap=snap1 testimg-diff2
+ rbd copy testimg-diff1 --sparse-size 768K testimg-diff3
+
+ # verify the result
+ rbd info testimg2 | grep 'size 256 MiB'
+ rbd info testimg3 | grep 'size 128 MiB'
+ rbd info testimg-diff2 | grep 'size 256 MiB'
+ rbd info testimg-diff3 | grep 'size 128 MiB'
+
+ # deep copies
+ rbd deep copy testimg1 testimg4
+ rbd deep copy testimg1 --snap=snap1 testimg5
+ rbd info testimg4 | grep 'size 128 MiB'
+ rbd info testimg5 | grep 'size 256 MiB'
+ rbd snap ls testimg4 | grep -v 'SNAPID' | wc -l | grep 1
+ rbd snap ls testimg4 | grep '.*snap1.*'
+
+ rbd export testimg1 /tmp/img1.new
+ rbd export testimg2 /tmp/img2.new
+ rbd export testimg3 /tmp/img3.new
+ rbd export testimg-diff1 /tmp/img-diff1.new
+ rbd export testimg-diff2 /tmp/img-diff2.new
+ rbd export testimg-diff3 /tmp/img-diff3.new
+
+ cmp /tmp/img2 /tmp/img2.new
+ cmp /tmp/img3 /tmp/img3.new
+ cmp /tmp/img2 /tmp/img-diff2.new
+ cmp /tmp/img3 /tmp/img-diff3.new
+
+ # rollback
+ rbd snap rollback --snap=snap1 testimg1
+ rbd snap rollback --snap=snap1 testimg-diff1
+ rbd info testimg1 | grep 'size 256 MiB'
+ rbd info testimg-diff1 | grep 'size 256 MiB'
+ rbd export testimg1 /tmp/img1.snap1
+ rbd export testimg-diff1 /tmp/img-diff1.snap1
+ cmp /tmp/img2 /tmp/img1.snap1
+ cmp /tmp/img2 /tmp/img-diff1.snap1
+
+ # test create, copy of zero-length images
+ rbd rm testimg2
+ rbd rm testimg3
+ rbd create testimg2 -s 0
+ rbd cp testimg2 testimg3
+ rbd deep cp testimg2 testimg6
+
+ # remove snapshots
+ rbd snap rm --snap=snap1 testimg1
+ rbd snap rm --snap=snap1 testimg-diff1
+ rbd info --snap=snap1 testimg1 2>&1 | grep 'error setting snapshot context: (2) No such file or directory'
+ rbd info --snap=snap1 testimg-diff1 2>&1 | grep 'error setting snapshot context: (2) No such file or directory'
+
+ # sparsify
+ rbd sparsify testimg1
+
+ remove_images
+ rm -f $TMP_FILES
+}
+
+test_rename() {
+ echo "testing rename..."
+ remove_images
+
+ rbd create --image-format 1 -s 1 foo
+ rbd create --image-format 2 -s 1 bar
+ rbd rename foo foo2
+ rbd rename foo2 bar 2>&1 | grep exists
+ rbd rename bar bar2
+ rbd rename bar2 foo2 2>&1 | grep exists
+
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+ rbd create -p rbd2 -s 1 foo
+ rbd rename rbd2/foo rbd2/bar
+ rbd -p rbd2 ls | grep bar
+ rbd rename rbd2/bar foo
+ rbd rename --pool rbd2 foo bar
+ ! rbd rename rbd2/bar --dest-pool rbd foo
+ rbd rename --pool rbd2 bar --dest-pool rbd2 foo
+ rbd -p rbd2 ls | grep foo
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+
+ remove_images
+}
+
+test_ls() {
+ echo "testing ls..."
+ remove_images
+
+ rbd create --image-format 1 -s 1 test1
+ rbd create --image-format 1 -s 1 test2
+ rbd ls | grep test1
+ rbd ls | grep test2
+ rbd ls | wc -l | grep 2
+ # look for fields in output of ls -l without worrying about space
+ rbd ls -l | grep 'test1.*1 MiB.*1'
+ rbd ls -l | grep 'test2.*1 MiB.*1'
+
+ rbd rm test1
+ rbd rm test2
+
+ rbd create --image-format 2 -s 1 test1
+ rbd create --image-format 2 -s 1 test2
+ rbd ls | grep test1
+ rbd ls | grep test2
+ rbd ls | wc -l | grep 2
+ rbd ls -l | grep 'test1.*1 MiB.*2'
+ rbd ls -l | grep 'test2.*1 MiB.*2'
+
+ rbd rm test1
+ rbd rm test2
+
+ rbd create --image-format 2 -s 1 test1
+ rbd create --image-format 1 -s 1 test2
+ rbd ls | grep test1
+ rbd ls | grep test2
+ rbd ls | wc -l | grep 2
+ rbd ls -l | grep 'test1.*1 MiB.*2'
+ rbd ls -l | grep 'test2.*1 MiB.*1'
+ remove_images
+
+ # test that many images can be shown by ls
+ for i in $(seq -w 00 99); do
+ rbd create image.$i -s 1
+ done
+ rbd ls | wc -l | grep 100
+ rbd ls -l | grep image | wc -l | grep 100
+ for i in $(seq -w 00 99); do
+ rbd rm image.$i
+ done
+
+ for i in $(seq -w 00 99); do
+ rbd create image.$i --image-format 2 -s 1
+ done
+ rbd ls | wc -l | grep 100
+ rbd ls -l | grep image | wc -l | grep 100
+ for i in $(seq -w 00 99); do
+ rbd rm image.$i
+ done
+}
+
+test_remove() {
+ echo "testing remove..."
+ remove_images
+
+ rbd remove "NOT_EXIST" && exit 1 || true # remove should fail
+ rbd create --image-format 1 -s 1 test1
+ rbd rm test1
+ rbd ls | wc -l | grep "^0$"
+
+ rbd create --image-format 2 -s 1 test2
+ rbd rm test2
+ rbd ls | wc -l | grep "^0$"
+
+ # check that remove succeeds even if it's
+ # interrupted partway through. simulate this
+ # by removing some objects manually.
+
+ # remove with header missing (old format)
+ rbd create --image-format 1 -s 1 test1
+ rados rm -p rbd test1.rbd
+ rbd rm test1
+ rbd ls | wc -l | grep "^0$"
+
+ if [ $tiered -eq 0 ]; then
+ # remove with header missing
+ rbd create --image-format 2 -s 1 test2
+ HEADER=$(rados -p rbd ls | grep '^rbd_header')
+ rados -p rbd rm $HEADER
+ rbd rm test2
+ rbd ls | wc -l | grep "^0$"
+
+ # remove with id missing
+ rbd create --image-format 2 -s 1 test2
+ rados -p rbd rm rbd_id.test2
+ rbd rm test2
+ rbd ls | wc -l | grep "^0$"
+
+ # remove with header and id missing
+ rbd create --image-format 2 -s 1 test2
+ HEADER=$(rados -p rbd ls | grep '^rbd_header')
+ rados -p rbd rm $HEADER
+ rados -p rbd rm rbd_id.test2
+ rbd rm test2
+ rbd ls | wc -l | grep "^0$"
+ fi
+
+ # remove with rbd_children object missing (and, by extension,
+ # with child not mentioned in rbd_children)
+ rbd create --image-format 2 -s 1 test2
+ rbd snap create test2@snap
+ rbd snap protect test2@snap
+ rbd clone test2@snap clone --rbd-default-clone-format 1
+
+ rados -p rbd rm rbd_children
+ rbd rm clone
+ rbd ls | grep clone | wc -l | grep '^0$'
+
+ rbd snap unprotect test2@snap
+ rbd snap rm test2@snap
+ rbd rm test2
+}
+
+test_locking() {
+ echo "testing locking..."
+ remove_images
+
+ rbd create $RBD_CREATE_ARGS -s 1 test1
+ rbd lock list test1 | wc -l | grep '^0$'
+ rbd lock add test1 id
+ rbd lock list test1 | grep ' 1 '
+ LOCKER=$(rbd lock list test1 | tail -n 1 | awk '{print $1;}')
+ rbd lock remove test1 id $LOCKER
+ rbd lock list test1 | wc -l | grep '^0$'
+
+ rbd lock add test1 id --shared tag
+ rbd lock list test1 | grep ' 1 '
+ rbd lock add test1 id --shared tag
+ rbd lock list test1 | grep ' 2 '
+ rbd lock add test1 id2 --shared tag
+ rbd lock list test1 | grep ' 3 '
+ rbd lock list test1 | tail -n 1 | awk '{print $2, $1;}' | xargs rbd lock remove test1
+ if rbd info test1 | grep -qE "features:.*exclusive"
+ then
+ # new locking functionality requires all locks to be released
+ while [ -n "$(rbd lock list test1)" ]
+ do
+ rbd lock list test1 | tail -n 1 | awk '{print $2, $1;}' | xargs rbd lock remove test1
+ done
+ fi
+ rbd rm test1
+}
+
+test_pool_image_args() {
+ echo "testing pool and image args..."
+ remove_images
+
+ ceph osd pool delete test test --yes-i-really-really-mean-it || true
+ ceph osd pool create test 32
+ rbd pool init test
+ truncate -s 1 /tmp/empty /tmp/empty@snap
+
+ rbd ls | wc -l | grep 0
+ rbd create -s 1 test1
+ rbd ls | grep -q test1
+ rbd import --image test2 /tmp/empty
+ rbd ls | grep -q test2
+ rbd --dest test3 import /tmp/empty
+ rbd ls | grep -q test3
+ rbd import /tmp/empty foo
+ rbd ls | grep -q foo
+
+ # should fail due to "destination snapname specified"
+ rbd import --dest test/empty@snap /tmp/empty && exit 1 || true
+ rbd import /tmp/empty test/empty@snap && exit 1 || true
+ rbd import --image test/empty@snap /tmp/empty && exit 1 || true
+ rbd import /tmp/empty@snap && exit 1 || true
+
+ rbd ls test | wc -l | grep 0
+ rbd import /tmp/empty test/test1
+ rbd ls test | grep -q test1
+ rbd -p test import /tmp/empty test2
+ rbd ls test | grep -q test2
+ rbd --image test3 -p test import /tmp/empty
+ rbd ls test | grep -q test3
+ rbd --image test4 -p test import /tmp/empty
+ rbd ls test | grep -q test4
+ rbd --dest test5 -p test import /tmp/empty
+ rbd ls test | grep -q test5
+ rbd --dest test6 --dest-pool test import /tmp/empty
+ rbd ls test | grep -q test6
+ rbd --image test7 --dest-pool test import /tmp/empty
+ rbd ls test | grep -q test7
+ rbd --image test/test8 import /tmp/empty
+ rbd ls test | grep -q test8
+ rbd --dest test/test9 import /tmp/empty
+ rbd ls test | grep -q test9
+ rbd import --pool test /tmp/empty
+ rbd ls test | grep -q empty
+
+ # copy with no explicit pool goes to pool rbd
+ rbd copy test/test9 test10
+ rbd ls test | grep -qv test10
+ rbd ls | grep -q test10
+ rbd copy test/test9 test/test10
+ rbd ls test | grep -q test10
+ rbd copy --pool test test10 --dest-pool test test11
+ rbd ls test | grep -q test11
+ rbd copy --dest-pool rbd --pool test test11 test12
+ rbd ls | grep test12
+ rbd ls test | grep -qv test12
+
+ rm -f /tmp/empty /tmp/empty@snap
+ ceph osd pool delete test test --yes-i-really-really-mean-it
+
+ for f in foo test1 test10 test12 test2 test3 ; do
+ rbd rm $f
+ done
+}
+
+test_clone() {
+ echo "testing clone..."
+ remove_images
+ rbd create test1 $RBD_CREATE_ARGS -s 1
+ rbd snap create test1@s1
+ rbd snap protect test1@s1
+
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+ rbd clone test1@s1 rbd2/clone
+ rbd -p rbd2 ls | grep clone
+ rbd -p rbd2 ls -l | grep clone | grep test1@s1
+ rbd ls | grep -v clone
+ rbd flatten rbd2/clone
+ rbd snap create rbd2/clone@s1
+ rbd snap protect rbd2/clone@s1
+ rbd clone rbd2/clone@s1 clone2
+ rbd ls | grep clone2
+ rbd ls -l | grep clone2 | grep rbd2/clone@s1
+ rbd -p rbd2 ls | grep -v clone2
+
+ rbd rm clone2
+ rbd snap unprotect rbd2/clone@s1
+ rbd snap rm rbd2/clone@s1
+ rbd rm rbd2/clone
+ rbd snap unprotect test1@s1
+ rbd snap rm test1@s1
+ rbd rm test1
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+}
+
+test_trash() {
+ echo "testing trash..."
+ remove_images
+
+ rbd create $RBD_CREATE_ARGS -s 1 test1
+ rbd create $RBD_CREATE_ARGS -s 1 test2
+ rbd ls | grep test1
+ rbd ls | grep test2
+ rbd ls | wc -l | grep 2
+ rbd ls -l | grep 'test1.*2.*'
+ rbd ls -l | grep 'test2.*2.*'
+
+ rbd trash mv test1
+ rbd ls | grep test2
+ rbd ls | wc -l | grep 1
+ rbd ls -l | grep 'test2.*2.*'
+
+ rbd trash ls | grep test1
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls -l | grep 'test1.*USER.*'
+ rbd trash ls -l | grep -v 'protected until'
+
+ ID=`rbd trash ls | cut -d ' ' -f 1`
+ rbd trash rm $ID
+
+ rbd trash mv test2
+ ID=`rbd trash ls | cut -d ' ' -f 1`
+ rbd info --image-id $ID | grep "rbd image 'test2'"
+
+ rbd trash restore $ID
+ rbd ls | grep test2
+ rbd ls | wc -l | grep 1
+ rbd ls -l | grep 'test2.*2.*'
+
+ rbd trash mv test2 --expires-at "3600 sec"
+ rbd trash ls | grep test2
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls -l | grep 'test2.*USER.*protected until'
+
+ rbd trash rm $ID 2>&1 | grep 'Deferment time has not expired'
+ rbd trash rm --image-id $ID --force
+
+ rbd create $RBD_CREATE_ARGS -s 1 test1
+ rbd snap create test1@snap1
+ rbd snap protect test1@snap1
+ rbd trash mv test1
+
+ rbd trash ls | grep test1
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls -l | grep 'test1.*USER.*'
+ rbd trash ls -l | grep -v 'protected until'
+
+ ID=`rbd trash ls | cut -d ' ' -f 1`
+ rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 1
+ rbd snap ls --image-id $ID | grep '.*snap1.*'
+
+ rbd snap unprotect --image-id $ID --snap snap1
+ rbd snap rm --image-id $ID --snap snap1
+ rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 0
+
+ rbd trash restore $ID
+ rbd snap create test1@snap1
+ rbd snap create test1@snap2
+ rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 2
+ rbd snap purge --image-id $ID
+ rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 0
+
+ rbd rm --rbd_move_to_trash_on_remove=true --rbd_move_to_trash_on_remove_expire_seconds=3600 test1
+ rbd trash ls | grep test1
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls -l | grep 'test1.*USER.*protected until'
+ rbd trash rm $ID 2>&1 | grep 'Deferment time has not expired'
+ rbd trash rm --image-id $ID --force
+
+ remove_images
+}
+
+test_purge() {
+ echo "testing trash purge..."
+ remove_images
+
+ rbd trash ls | wc -l | grep 0
+ rbd trash purge
+
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd create $RBD_CREATE_ARGS --size 256 testimg2
+ rbd trash mv testimg1
+ rbd trash mv testimg2
+ rbd trash ls | wc -l | grep 2
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd create $RBD_CREATE_ARGS --size 256 testimg2
+ rbd trash mv testimg1 --expires-at "1 hour"
+ rbd trash mv testimg2 --expires-at "3 hours"
+ rbd trash ls | wc -l | grep 2
+ rbd trash purge
+ rbd trash ls | wc -l | grep 2
+ rbd trash purge --expired-before "now + 2 hours"
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls | grep testimg2
+ rbd trash purge --expired-before "now + 4 hours"
+ rbd trash ls | wc -l | grep 0
+
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd snap create testimg1@snap # pin testimg1
+ rbd create $RBD_CREATE_ARGS --size 256 testimg2
+ rbd create $RBD_CREATE_ARGS --size 256 testimg3
+ rbd trash mv testimg1
+ rbd trash mv testimg2
+ rbd trash mv testimg3
+ rbd trash ls | wc -l | grep 3
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls | grep testimg1
+ ID=$(rbd trash ls | awk '{ print $1 }')
+ rbd snap purge --image-id $ID
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd create $RBD_CREATE_ARGS --size 256 testimg2
+ rbd snap create testimg2@snap # pin testimg2
+ rbd create $RBD_CREATE_ARGS --size 256 testimg3
+ rbd trash mv testimg1
+ rbd trash mv testimg2
+ rbd trash mv testimg3
+ rbd trash ls | wc -l | grep 3
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls | grep testimg2
+ ID=$(rbd trash ls | awk '{ print $1 }')
+ rbd snap purge --image-id $ID
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd create $RBD_CREATE_ARGS --size 256 testimg2
+ rbd create $RBD_CREATE_ARGS --size 256 testimg3
+ rbd snap create testimg3@snap # pin testimg3
+ rbd trash mv testimg1
+ rbd trash mv testimg2
+ rbd trash mv testimg3
+ rbd trash ls | wc -l | grep 3
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls | grep testimg3
+ ID=$(rbd trash ls | awk '{ print $1 }')
+ rbd snap purge --image-id $ID
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+
+ # test purging a clone with a chain of parents
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd snap create testimg1@snap
+ rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2
+ rbd snap rm testimg1@snap
+ rbd create $RBD_CREATE_ARGS --size 256 testimg3
+ rbd snap create testimg2@snap
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5
+ rbd snap rm testimg2@snap
+ rbd snap create testimg4@snap
+ rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6
+ rbd snap rm testimg4@snap
+ rbd trash mv testimg1
+ rbd trash mv testimg2
+ rbd trash mv testimg3
+ rbd trash mv testimg4
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 3
+ rbd trash ls | grep testimg1
+ rbd trash ls | grep testimg2
+ rbd trash ls | grep testimg4
+ rbd trash mv testimg6
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 2
+ rbd trash ls | grep testimg1
+ rbd trash ls | grep testimg2
+ rbd trash mv testimg5
+ rbd trash ls | wc -l | grep 3
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd snap create testimg1@snap
+ rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2
+ rbd snap rm testimg1@snap
+ rbd create $RBD_CREATE_ARGS --size 256 testimg3
+ rbd snap create testimg3@snap # pin testimg3
+ rbd snap create testimg2@snap
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5
+ rbd snap rm testimg2@snap
+ rbd snap create testimg4@snap
+ rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6
+ rbd snap rm testimg4@snap
+ rbd trash mv testimg1
+ rbd trash mv testimg2
+ rbd trash mv testimg3
+ rbd trash mv testimg4
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 4
+ rbd trash mv testimg6
+ rbd trash ls | wc -l | grep 5
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 3
+ rbd trash ls | grep testimg1
+ rbd trash ls | grep testimg2
+ rbd trash ls | grep testimg3
+ rbd trash mv testimg5
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls | grep testimg3
+ ID=$(rbd trash ls | awk '{ print $1 }')
+ rbd snap purge --image-id $ID
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+
+ # test purging a clone with a chain of auto-delete parents
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd snap create testimg1@snap
+ rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2
+ rbd snap rm testimg1@snap
+ rbd create $RBD_CREATE_ARGS --size 256 testimg3
+ rbd snap create testimg2@snap
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5
+ rbd snap rm testimg2@snap
+ rbd snap create testimg4@snap
+ rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6
+ rbd snap rm testimg4@snap
+ rbd rm --rbd_move_parent_to_trash_on_remove=true testimg1
+ rbd rm --rbd_move_parent_to_trash_on_remove=true testimg2
+ rbd trash mv testimg3
+ rbd rm --rbd_move_parent_to_trash_on_remove=true testimg4
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 3
+ rbd trash ls | grep testimg1
+ rbd trash ls | grep testimg2
+ rbd trash ls | grep testimg4
+ rbd trash mv testimg6
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 2
+ rbd trash ls | grep testimg1
+ rbd trash ls | grep testimg2
+ rbd trash mv testimg5
+ rbd trash ls | wc -l | grep 3
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd snap create testimg1@snap
+ rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2
+ rbd snap rm testimg1@snap
+ rbd create $RBD_CREATE_ARGS --size 256 testimg3
+ rbd snap create testimg3@snap # pin testimg3
+ rbd snap create testimg2@snap
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5
+ rbd snap rm testimg2@snap
+ rbd snap create testimg4@snap
+ rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6
+ rbd snap rm testimg4@snap
+ rbd rm --rbd_move_parent_to_trash_on_remove=true testimg1
+ rbd rm --rbd_move_parent_to_trash_on_remove=true testimg2
+ rbd trash mv testimg3
+ rbd rm --rbd_move_parent_to_trash_on_remove=true testimg4
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 4
+ rbd trash mv testimg6
+ rbd trash ls | wc -l | grep 5
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 3
+ rbd trash ls | grep testimg1
+ rbd trash ls | grep testimg2
+ rbd trash ls | grep testimg3
+ rbd trash mv testimg5
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls | grep testimg3
+ ID=$(rbd trash ls | awk '{ print $1 }')
+ rbd snap purge --image-id $ID
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+}
+
+test_deep_copy_clone() {
+ echo "testing deep copy clone..."
+ remove_images
+
+ rbd create testimg1 $RBD_CREATE_ARGS --size 256
+ rbd snap create testimg1 --snap=snap1
+ rbd snap protect testimg1@snap1
+ rbd clone testimg1@snap1 testimg2
+ rbd snap create testimg2@snap2
+ rbd deep copy testimg2 testimg3
+ rbd info testimg3 | grep 'size 256 MiB'
+ rbd info testimg3 | grep 'parent: rbd/testimg1@snap1'
+ rbd snap ls testimg3 | grep -v 'SNAPID' | wc -l | grep 1
+ rbd snap ls testimg3 | grep '.*snap2.*'
+ rbd info testimg2 | grep 'features:.*deep-flatten' || rbd snap rm testimg2@snap2
+ rbd info testimg3 | grep 'features:.*deep-flatten' || rbd snap rm testimg3@snap2
+ rbd flatten testimg2
+ rbd flatten testimg3
+ rbd snap unprotect testimg1@snap1
+ rbd snap purge testimg2
+ rbd snap purge testimg3
+ rbd rm testimg2
+ rbd rm testimg3
+
+ rbd snap protect testimg1@snap1
+ rbd clone testimg1@snap1 testimg2
+ rbd snap create testimg2@snap2
+ rbd deep copy --flatten testimg2 testimg3
+ rbd info testimg3 | grep 'size 256 MiB'
+ rbd info testimg3 | grep -v 'parent:'
+ rbd snap ls testimg3 | grep -v 'SNAPID' | wc -l | grep 1
+ rbd snap ls testimg3 | grep '.*snap2.*'
+ rbd info testimg2 | grep 'features:.*deep-flatten' || rbd snap rm testimg2@snap2
+ rbd flatten testimg2
+ rbd snap unprotect testimg1@snap1
+
+ remove_images
+}
+
+test_clone_v2() {
+ echo "testing clone v2..."
+ remove_images
+
+ rbd create $RBD_CREATE_ARGS -s 1 test1
+ rbd snap create test1@1
+ rbd clone --rbd-default-clone-format=1 test1@1 test2 && exit 1 || true
+ rbd clone --rbd-default-clone-format=2 test1@1 test2
+ rbd clone --rbd-default-clone-format=2 test1@1 test3
+
+ rbd snap protect test1@1
+ rbd clone --rbd-default-clone-format=1 test1@1 test4
+
+ rbd children test1@1 | sort | tr '\n' ' ' | grep -E "test2.*test3.*test4"
+ rbd children --descendants test1 | sort | tr '\n' ' ' | grep -E "test2.*test3.*test4"
+
+ rbd remove test4
+ rbd snap unprotect test1@1
+
+ rbd snap remove test1@1
+ rbd snap list --all test1 | grep -E "trash \(1\) *$"
+
+ rbd snap create test1@2
+ rbd rm test1 2>&1 | grep 'image has snapshots'
+
+ rbd snap rm test1@2
+ rbd rm test1 2>&1 | grep 'linked clones'
+
+ rbd rm test3
+ rbd rm test1 2>&1 | grep 'linked clones'
+
+ rbd flatten test2
+ rbd snap list --all test1 | wc -l | grep '^0$'
+ rbd rm test1
+ rbd rm test2
+
+ rbd create $RBD_CREATE_ARGS -s 1 test1
+ rbd snap create test1@1
+ rbd snap create test1@2
+ rbd clone test1@1 test2 --rbd-default-clone-format 2
+ rbd clone test1@2 test3 --rbd-default-clone-format 2
+ rbd snap rm test1@1
+ rbd snap rm test1@2
+ expect_fail rbd rm test1
+ rbd rm test1 --rbd-move-parent-to-trash-on-remove=true
+ rbd trash ls -a | grep test1
+ rbd rm test2
+ rbd trash ls -a | grep test1
+ rbd rm test3
+ rbd trash ls -a | expect_fail grep test1
+}
+
+test_thick_provision() {
+ echo "testing thick provision..."
+ remove_images
+
+ # Try to create small and large thick-pro image and
+ # check actual size. (64M and 4G)
+
+ # Small thick-pro image test
+ rbd create $RBD_CREATE_ARGS --thick-provision -s 64M test1
+ count=0
+ ret=""
+ while [ $count -lt 10 ]
+ do
+ rbd du|grep test1|tr -s " "|cut -d " " -f 4-5|grep '^64 MiB' && ret=$?
+ if [ "$ret" = "0" ]
+ then
+ break;
+ fi
+ count=`expr $count + 1`
+ sleep 2
+ done
+ rbd du
+ if [ "$ret" != "0" ]
+ then
+ exit 1
+ fi
+ rbd rm test1
+ rbd ls | grep test1 | wc -l | grep '^0$'
+
+ # Large thick-pro image test
+ rbd create $RBD_CREATE_ARGS --thick-provision -s 4G test1
+ count=0
+ ret=""
+ while [ $count -lt 10 ]
+ do
+ rbd du|grep test1|tr -s " "|cut -d " " -f 4-5|grep '^4 GiB' && ret=$?
+ if [ "$ret" = "0" ]
+ then
+ break;
+ fi
+ count=`expr $count + 1`
+ sleep 2
+ done
+ rbd du
+ if [ "$ret" != "0" ]
+ then
+ exit 1
+ fi
+ rbd rm test1
+ rbd ls | grep test1 | wc -l | grep '^0$'
+}
+
+test_namespace() {
+ echo "testing namespace..."
+ remove_images
+
+ rbd namespace ls | wc -l | grep '^0$'
+ rbd namespace create rbd/test1
+ rbd namespace create --pool rbd --namespace test2
+ rbd namespace create --namespace test3
+ expect_fail rbd namespace create rbd/test3
+
+ rbd namespace list | grep 'test' | wc -l | grep '^3$'
+
+ expect_fail rbd namespace remove --pool rbd missing
+
+ rbd create $RBD_CREATE_ARGS --size 1G rbd/test1/image1
+
+ # default test1 ns to test2 ns clone
+ rbd bench --io-type write --io-pattern rand --io-total 32M --io-size 4K rbd/test1/image1
+ rbd snap create rbd/test1/image1@1
+ rbd clone --rbd-default-clone-format 2 rbd/test1/image1@1 rbd/test2/image1
+ rbd snap rm rbd/test1/image1@1
+ cmp <(rbd export rbd/test1/image1 -) <(rbd export rbd/test2/image1 -)
+ rbd rm rbd/test2/image1
+
+ # default ns to test1 ns clone
+ rbd create $RBD_CREATE_ARGS --size 1G rbd/image2
+ rbd bench --io-type write --io-pattern rand --io-total 32M --io-size 4K rbd/image2
+ rbd snap create rbd/image2@1
+ rbd clone --rbd-default-clone-format 2 rbd/image2@1 rbd/test2/image2
+ rbd snap rm rbd/image2@1
+ cmp <(rbd export rbd/image2 -) <(rbd export rbd/test2/image2 -)
+ expect_fail rbd rm rbd/image2
+ rbd rm rbd/test2/image2
+ rbd rm rbd/image2
+
+ # v1 clones are supported within the same namespace
+ rbd create $RBD_CREATE_ARGS --size 1G rbd/test1/image3
+ rbd snap create rbd/test1/image3@1
+ rbd snap protect rbd/test1/image3@1
+ rbd clone --rbd-default-clone-format 1 rbd/test1/image3@1 rbd/test1/image4
+ rbd rm rbd/test1/image4
+ rbd snap unprotect rbd/test1/image3@1
+ rbd snap rm rbd/test1/image3@1
+ rbd rm rbd/test1/image3
+
+ rbd create $RBD_CREATE_ARGS --size 1G --namespace test1 image2
+ expect_fail rbd namespace remove rbd/test1
+
+ rbd group create rbd/test1/group1
+ rbd group image add rbd/test1/group1 rbd/test1/image1
+ rbd group rm rbd/test1/group1
+
+ rbd trash move rbd/test1/image1
+ ID=`rbd trash --namespace test1 ls | cut -d ' ' -f 1`
+ rbd trash rm rbd/test1/${ID}
+
+ rbd remove rbd/test1/image2
+
+ rbd namespace remove --pool rbd --namespace test1
+ rbd namespace remove --namespace test3
+
+ rbd namespace list | grep 'test' | wc -l | grep '^1$'
+ rbd namespace remove rbd/test2
+}
+
+get_migration_state() {
+ local image=$1
+
+ rbd --format xml status $image |
+ $XMLSTARLET sel -t -v '//status/migration/state'
+}
+
+test_migration() {
+ echo "testing migration..."
+ remove_images
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+
+ # Convert to new format
+ rbd create --image-format 1 -s 128M test1
+ rbd info test1 | grep 'format: 1'
+ rbd migration prepare test1 --image-format 2
+ test "$(get_migration_state test1)" = prepared
+ rbd info test1 | grep 'format: 2'
+ rbd rm test1 && exit 1 || true
+ rbd migration execute test1
+ test "$(get_migration_state test1)" = executed
+ rbd migration commit test1
+ get_migration_state test1 && exit 1 || true
+
+ # Enable layering (and some other features)
+ rbd info test1 | grep 'features: .*layering' && exit 1 || true
+ rbd migration prepare test1 --image-feature \
+ layering,exclusive-lock,object-map,fast-diff,deep-flatten
+ rbd info test1 | grep 'features: .*layering'
+ rbd migration execute test1
+ rbd migration commit test1
+
+ # Migration to other pool
+ rbd migration prepare test1 rbd2/test1
+ test "$(get_migration_state rbd2/test1)" = prepared
+ rbd ls | wc -l | grep '^0$'
+ rbd -p rbd2 ls | grep test1
+ rbd migration execute test1
+ test "$(get_migration_state rbd2/test1)" = executed
+ rbd rm rbd2/test1 && exit 1 || true
+ rbd migration commit test1
+
+ # Migration to other namespace
+ rbd namespace create rbd2/ns1
+ rbd namespace create rbd2/ns2
+ rbd migration prepare rbd2/test1 rbd2/ns1/test1
+ test "$(get_migration_state rbd2/ns1/test1)" = prepared
+ rbd migration execute rbd2/test1
+ test "$(get_migration_state rbd2/ns1/test1)" = executed
+ rbd migration commit rbd2/test1
+ rbd migration prepare rbd2/ns1/test1 rbd2/ns2/test1
+ rbd migration execute rbd2/ns2/test1
+ rbd migration commit rbd2/ns2/test1
+
+ # Enable data pool
+ rbd create -s 128M test1
+ rbd migration prepare test1 --data-pool rbd2
+ rbd info test1 | grep 'data_pool: rbd2'
+ rbd migration execute test1
+ rbd migration commit test1
+
+ # testing trash
+ rbd migration prepare test1
+ expect_fail rbd trash mv test1
+ ID=`rbd trash ls -a | cut -d ' ' -f 1`
+ expect_fail rbd trash rm $ID
+ expect_fail rbd trash restore $ID
+ rbd migration abort test1
+
+ # Migrate parent
+ rbd remove test1
+ dd if=/dev/urandom bs=1M count=1 | rbd --image-format 2 import - test1
+ md5sum=$(rbd export test1 - | md5sum)
+ rbd snap create test1@snap1
+ rbd snap protect test1@snap1
+ rbd snap create test1@snap2
+ rbd clone test1@snap1 clone_v1 --rbd_default_clone_format=1
+ rbd clone test1@snap2 clone_v2 --rbd_default_clone_format=2
+ rbd info clone_v1 | fgrep 'parent: rbd/test1@snap1'
+ rbd info clone_v2 | fgrep 'parent: rbd/test1@snap2'
+ rbd info clone_v2 |grep 'op_features: clone-child'
+ test "$(rbd export clone_v1 - | md5sum)" = "${md5sum}"
+ test "$(rbd export clone_v2 - | md5sum)" = "${md5sum}"
+ test "$(rbd children test1@snap1)" = "rbd/clone_v1"
+ test "$(rbd children test1@snap2)" = "rbd/clone_v2"
+ rbd migration prepare test1 rbd2/test2
+ rbd info clone_v1 | fgrep 'parent: rbd2/test2@snap1'
+ rbd info clone_v2 | fgrep 'parent: rbd2/test2@snap2'
+ rbd info clone_v2 | fgrep 'op_features: clone-child'
+ test "$(rbd children rbd2/test2@snap1)" = "rbd/clone_v1"
+ test "$(rbd children rbd2/test2@snap2)" = "rbd/clone_v2"
+ rbd migration execute test1
+ expect_fail rbd migration commit test1
+ rbd migration commit test1 --force
+ test "$(rbd export clone_v1 - | md5sum)" = "${md5sum}"
+ test "$(rbd export clone_v2 - | md5sum)" = "${md5sum}"
+ rbd migration prepare rbd2/test2 test1
+ rbd info clone_v1 | fgrep 'parent: rbd/test1@snap1'
+ rbd info clone_v2 | fgrep 'parent: rbd/test1@snap2'
+ rbd info clone_v2 | fgrep 'op_features: clone-child'
+ test "$(rbd children test1@snap1)" = "rbd/clone_v1"
+ test "$(rbd children test1@snap2)" = "rbd/clone_v2"
+ rbd migration execute test1
+ expect_fail rbd migration commit test1
+ rbd migration commit test1 --force
+ test "$(rbd export clone_v1 - | md5sum)" = "${md5sum}"
+ test "$(rbd export clone_v2 - | md5sum)" = "${md5sum}"
+ rbd remove clone_v1
+ rbd remove clone_v2
+ rbd snap unprotect test1@snap1
+ rbd snap purge test1
+ rbd rm test1
+
+ for format in 1 2; do
+ # Abort migration after successful prepare
+ rbd create -s 128M --image-format ${format} test2
+ rbd migration prepare test2 --data-pool rbd2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd migration abort test2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd rm test2
+
+ # Abort migration after successful execute
+ rbd create -s 128M --image-format ${format} test2
+ rbd migration prepare test2 --data-pool rbd2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd migration execute test2
+ rbd migration abort test2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd rm test2
+
+ # Migration is automatically aborted if prepare failed
+ rbd create -s 128M --image-format ${format} test2
+ rbd migration prepare test2 --data-pool INVALID_DATA_POOL && exit 1 || true
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd rm test2
+
+ # Abort migration to other pool
+ rbd create -s 128M --image-format ${format} test2
+ rbd migration prepare test2 rbd2/test2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 rbd2/test2
+ rbd migration abort test2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd rm test2
+
+ # The same but abort using destination image
+ rbd create -s 128M --image-format ${format} test2
+ rbd migration prepare test2 rbd2/test2
+ rbd migration abort rbd2/test2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd rm test2
+
+ test $format = 1 && continue
+
+ # Abort migration to other namespace
+ rbd create -s 128M --image-format ${format} test2
+ rbd migration prepare test2 rbd2/ns1/test3
+ rbd bench --io-type write --io-size 1024 --io-total 1024 rbd2/ns1/test3
+ rbd migration abort test2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd rm test2
+ done
+
+ remove_images
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+}
+
+test_config() {
+ echo "testing config..."
+ remove_images
+
+ expect_fail rbd config global set osd rbd_cache true
+ expect_fail rbd config global set global debug_ms 10
+ expect_fail rbd config global set global rbd_UNKNOWN false
+ expect_fail rbd config global set global rbd_cache INVALID
+ rbd config global set global rbd_cache false
+ rbd config global set client rbd_cache true
+ rbd config global set client.123 rbd_cache false
+ rbd config global get global rbd_cache | grep '^false$'
+ rbd config global get client rbd_cache | grep '^true$'
+ rbd config global get client.123 rbd_cache | grep '^false$'
+ expect_fail rbd config global get client.UNKNOWN rbd_cache
+ rbd config global list global | grep '^rbd_cache * false * global *$'
+ rbd config global list client | grep '^rbd_cache * true * client *$'
+ rbd config global list client.123 | grep '^rbd_cache * false * client.123 *$'
+ rbd config global list client.UNKNOWN | grep '^rbd_cache * true * client *$'
+ rbd config global rm client rbd_cache
+ expect_fail rbd config global get client rbd_cache
+ rbd config global list client | grep '^rbd_cache * false * global *$'
+ rbd config global rm client.123 rbd_cache
+ rbd config global rm global rbd_cache
+
+ rbd config pool set rbd rbd_cache true
+ rbd config pool list rbd | grep '^rbd_cache * true * pool *$'
+ rbd config pool get rbd rbd_cache | grep '^true$'
+
+ rbd create $RBD_CREATE_ARGS -s 1 test1
+
+ rbd config image list rbd/test1 | grep '^rbd_cache * true * pool *$'
+ rbd config image set rbd/test1 rbd_cache false
+ rbd config image list rbd/test1 | grep '^rbd_cache * false * image *$'
+ rbd config image get rbd/test1 rbd_cache | grep '^false$'
+ rbd config image remove rbd/test1 rbd_cache
+ expect_fail rbd config image get rbd/test1 rbd_cache
+ rbd config image list rbd/test1 | grep '^rbd_cache * true * pool *$'
+
+ rbd config pool remove rbd rbd_cache
+ expect_fail rbd config pool get rbd rbd_cache
+ rbd config pool list rbd | grep '^rbd_cache * true * config *$'
+
+ rbd rm test1
+}
+
+test_trash_purge_schedule() {
+ echo "testing trash purge schedule..."
+ remove_images
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+ rbd namespace create rbd2/ns1
+
+ test "$(ceph rbd trash purge schedule list)" = "{}"
+ ceph rbd trash purge schedule status | fgrep '"scheduled": []'
+
+ expect_fail rbd trash purge schedule ls
+ test "$(rbd trash purge schedule ls -R --format json)" = "[]"
+
+ rbd trash purge schedule add -p rbd 1d 01:30
+
+ rbd trash purge schedule ls -p rbd | grep 'every 1d starting at 01:30'
+ expect_fail rbd trash purge schedule ls
+ rbd trash purge schedule ls -R | grep 'every 1d starting at 01:30'
+ rbd trash purge schedule ls -R -p rbd | grep 'every 1d starting at 01:30'
+ expect_fail rbd trash purge schedule ls -p rbd2
+ test "$(rbd trash purge schedule ls -p rbd2 -R --format json)" = "[]"
+
+ rbd trash purge schedule add -p rbd2/ns1 2d
+ test "$(rbd trash purge schedule ls -p rbd2 -R --format json)" != "[]"
+ rbd trash purge schedule ls -p rbd2 -R | grep 'rbd2 *ns1 *every 2d'
+ rbd trash purge schedule rm -p rbd2/ns1
+ test "$(rbd trash purge schedule ls -p rbd2 -R --format json)" = "[]"
+
+ for i in `seq 12`; do
+ test "$(rbd trash purge schedule status --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool')" = 'rbd' && break
+ sleep 10
+ done
+ rbd trash purge schedule status
+ test "$(rbd trash purge schedule status --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool')" = 'rbd'
+ test "$(rbd trash purge schedule status -p rbd --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool')" = 'rbd'
+
+ rbd trash purge schedule add 2d 00:17
+ rbd trash purge schedule ls | grep 'every 2d starting at 00:17'
+ rbd trash purge schedule ls -R | grep 'every 2d starting at 00:17'
+ expect_fail rbd trash purge schedule ls -p rbd2
+ rbd trash purge schedule ls -p rbd2 -R | grep 'every 2d starting at 00:17'
+ rbd trash purge schedule ls -p rbd2/ns1 -R | grep 'every 2d starting at 00:17'
+ test "$(rbd trash purge schedule ls -R -p rbd2/ns1 --format xml |
+ $XMLSTARLET sel -t -v '//schedules/schedule/pool')" = "-"
+ test "$(rbd trash purge schedule ls -R -p rbd2/ns1 --format xml |
+ $XMLSTARLET sel -t -v '//schedules/schedule/namespace')" = "-"
+ test "$(rbd trash purge schedule ls -R -p rbd2/ns1 --format xml |
+ $XMLSTARLET sel -t -v '//schedules/schedule/items/item/start_time')" = "00:17:00"
+
+ for i in `seq 12`; do
+ rbd trash purge schedule status --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool' | grep 'rbd2' && break
+ sleep 10
+ done
+ rbd trash purge schedule status
+ rbd trash purge schedule status --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool' | grep 'rbd2'
+ echo $(rbd trash purge schedule status --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool') | grep 'rbd rbd2 rbd2'
+ test "$(rbd trash purge schedule status -p rbd --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool')" = 'rbd'
+ test "$(echo $(rbd trash purge schedule status -p rbd2 --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool'))" = 'rbd2 rbd2'
+
+ test "$(echo $(rbd trash purge schedule ls -R --format xml |
+ $XMLSTARLET sel -t -v '//schedules/schedule/items'))" = "2d00:17:00 1d01:30:00"
+
+ rbd trash purge schedule add 1d
+ rbd trash purge schedule ls | grep 'every 2d starting at 00:17'
+ rbd trash purge schedule ls | grep 'every 1d'
+
+ rbd trash purge schedule ls -R --format xml |
+ $XMLSTARLET sel -t -v '//schedules/schedule/items' | grep '2d00:17'
+
+ rbd trash purge schedule rm 1d
+ rbd trash purge schedule ls | grep 'every 2d starting at 00:17'
+ rbd trash purge schedule rm 2d 00:17
+ expect_fail rbd trash purge schedule ls
+
+ for p in rbd2 rbd2/ns1; do
+ rbd create $RBD_CREATE_ARGS -s 1 rbd2/ns1/test1
+ rbd trash mv rbd2/ns1/test1
+ rbd trash ls rbd2/ns1 | wc -l | grep '^1$'
+
+ rbd trash purge schedule add -p $p 1m
+ rbd trash purge schedule list -p rbd2 -R | grep 'every 1m'
+ rbd trash purge schedule list -p rbd2/ns1 -R | grep 'every 1m'
+
+ for i in `seq 12`; do
+ rbd trash ls rbd2/ns1 | wc -l | grep '^1$' || break
+ sleep 10
+ done
+ rbd trash ls rbd2/ns1 | wc -l | grep '^0$'
+
+ # repeat with kicked in schedule, see https://tracker.ceph.com/issues/53915
+ rbd trash purge schedule list -p rbd2 -R | grep 'every 1m'
+ rbd trash purge schedule list -p rbd2/ns1 -R | grep 'every 1m'
+
+ rbd trash purge schedule status | grep 'rbd2 *ns1'
+ rbd trash purge schedule status -p rbd2 | grep 'rbd2 *ns1'
+ rbd trash purge schedule status -p rbd2/ns1 | grep 'rbd2 *ns1'
+
+ rbd trash purge schedule rm -p $p 1m
+ done
+
+ # Negative tests
+ rbd trash purge schedule add 2m
+ expect_fail rbd trash purge schedule add -p rbd dummy
+ expect_fail rbd trash purge schedule add dummy
+ expect_fail rbd trash purge schedule remove -p rbd dummy
+ expect_fail rbd trash purge schedule remove dummy
+ rbd trash purge schedule ls -p rbd | grep 'every 1d starting at 01:30'
+ rbd trash purge schedule ls | grep 'every 2m'
+ rbd trash purge schedule remove -p rbd 1d 01:30
+ rbd trash purge schedule remove 2m
+ test "$(rbd trash purge schedule ls -R --format json)" = "[]"
+
+ remove_images
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+}
+
+test_trash_purge_schedule_recovery() {
+ echo "testing recovery of trash_purge_schedule handler after module's RADOS client is blocklisted..."
+ remove_images
+ ceph osd pool create rbd3 8
+ rbd pool init rbd3
+ rbd namespace create rbd3/ns1
+
+ rbd trash purge schedule add -p rbd3/ns1 2d
+ rbd trash purge schedule ls -p rbd3 -R | grep 'rbd3 *ns1 *every 2d'
+
+ # Fetch and blocklist the rbd_support module's RADOS client
+ CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
+ jq 'select(.name == "rbd_support")' |
+ jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
+ ceph osd blocklist add $CLIENT_ADDR
+ ceph osd blocklist ls | grep $CLIENT_ADDR
+
+ # Check that you can add a trash purge schedule after a few retries
+ expect_fail rbd trash purge schedule add -p rbd3 10m
+ sleep 10
+ for i in `seq 24`; do
+ rbd trash purge schedule add -p rbd3 10m && break
+ sleep 10
+ done
+
+ rbd trash purge schedule ls -p rbd3 -R | grep 'every 10m'
+ # Verify that the schedule present before client blocklisting is preserved
+ rbd trash purge schedule ls -p rbd3 -R | grep 'rbd3 *ns1 *every 2d'
+
+ rbd trash purge schedule remove -p rbd3 10m
+ rbd trash purge schedule remove -p rbd3/ns1 2d
+ rbd trash purge schedule ls -p rbd3 -R | expect_fail grep 'every 10m'
+ rbd trash purge schedule ls -p rbd3 -R | expect_fail grep 'rbd3 *ns1 *every 2d'
+
+ ceph osd pool rm rbd3 rbd3 --yes-i-really-really-mean-it
+
+}
+
+test_mirror_snapshot_schedule() {
+ echo "testing mirror snapshot schedule..."
+ remove_images
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+ rbd namespace create rbd2/ns1
+
+ rbd mirror pool enable rbd2 image
+ rbd mirror pool enable rbd2/ns1 image
+ rbd mirror pool peer add rbd2 cluster1
+
+ test "$(ceph rbd mirror snapshot schedule list)" = "{}"
+ ceph rbd mirror snapshot schedule status | fgrep '"scheduled_images": []'
+
+ expect_fail rbd mirror snapshot schedule ls
+ test "$(rbd mirror snapshot schedule ls -R --format json)" = "[]"
+
+ rbd create $RBD_CREATE_ARGS -s 1 rbd2/ns1/test1
+
+ test "$(rbd mirror image status rbd2/ns1/test1 |
+ grep -c mirror.primary)" = '0'
+
+ rbd mirror image enable rbd2/ns1/test1 snapshot
+
+ test "$(rbd mirror image status rbd2/ns1/test1 |
+ grep -c mirror.primary)" = '1'
+
+ rbd mirror snapshot schedule add -p rbd2/ns1 --image test1 1m
+ expect_fail rbd mirror snapshot schedule ls
+ rbd mirror snapshot schedule ls -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ expect_fail rbd mirror snapshot schedule ls -p rbd2
+ rbd mirror snapshot schedule ls -p rbd2 -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ expect_fail rbd mirror snapshot schedule ls -p rbd2/ns1
+ rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m'
+
+ for i in `seq 12`; do
+ test "$(rbd mirror image status rbd2/ns1/test1 |
+ grep -c mirror.primary)" -gt '1' && break
+ sleep 10
+ done
+
+ test "$(rbd mirror image status rbd2/ns1/test1 |
+ grep -c mirror.primary)" -gt '1'
+
+ # repeat with kicked in schedule, see https://tracker.ceph.com/issues/53915
+ expect_fail rbd mirror snapshot schedule ls
+ rbd mirror snapshot schedule ls -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ expect_fail rbd mirror snapshot schedule ls -p rbd2
+ rbd mirror snapshot schedule ls -p rbd2 -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ expect_fail rbd mirror snapshot schedule ls -p rbd2/ns1
+ rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m'
+
+ rbd mirror snapshot schedule status
+ test "$(rbd mirror snapshot schedule status --format xml |
+ $XMLSTARLET sel -t -v '//scheduled_images/image/image')" = 'rbd2/ns1/test1'
+ test "$(rbd mirror snapshot schedule status -p rbd2 --format xml |
+ $XMLSTARLET sel -t -v '//scheduled_images/image/image')" = 'rbd2/ns1/test1'
+ test "$(rbd mirror snapshot schedule status -p rbd2/ns1 --format xml |
+ $XMLSTARLET sel -t -v '//scheduled_images/image/image')" = 'rbd2/ns1/test1'
+ test "$(rbd mirror snapshot schedule status -p rbd2/ns1 --image test1 --format xml |
+ $XMLSTARLET sel -t -v '//scheduled_images/image/image')" = 'rbd2/ns1/test1'
+
+ rbd mirror image demote rbd2/ns1/test1
+ for i in `seq 12`; do
+ rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1' || break
+ sleep 10
+ done
+ rbd mirror snapshot schedule status | expect_fail grep 'rbd2/ns1/test1'
+
+ rbd mirror image promote rbd2/ns1/test1
+ for i in `seq 12`; do
+ rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1' && break
+ sleep 10
+ done
+ rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1'
+
+ rbd mirror snapshot schedule add 1h 00:15
+ test "$(rbd mirror snapshot schedule ls)" = 'every 1h starting at 00:15:00'
+ rbd mirror snapshot schedule ls -R | grep 'every 1h starting at 00:15:00'
+ rbd mirror snapshot schedule ls -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ expect_fail rbd mirror snapshot schedule ls -p rbd2
+ rbd mirror snapshot schedule ls -p rbd2 -R | grep 'every 1h starting at 00:15:00'
+ rbd mirror snapshot schedule ls -p rbd2 -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ expect_fail rbd mirror snapshot schedule ls -p rbd2/ns1
+ rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'every 1h starting at 00:15:00'
+ rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m'
+
+ # Negative tests
+ expect_fail rbd mirror snapshot schedule add dummy
+ expect_fail rbd mirror snapshot schedule add -p rbd2/ns1 --image test1 dummy
+ expect_fail rbd mirror snapshot schedule remove dummy
+ expect_fail rbd mirror snapshot schedule remove -p rbd2/ns1 --image test1 dummy
+ test "$(rbd mirror snapshot schedule ls)" = 'every 1h starting at 00:15:00'
+ test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m'
+
+ rbd rm rbd2/ns1/test1
+ for i in `seq 12`; do
+ rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1' || break
+ sleep 10
+ done
+ rbd mirror snapshot schedule status | expect_fail grep 'rbd2/ns1/test1'
+
+ rbd mirror snapshot schedule remove
+ test "$(rbd mirror snapshot schedule ls -R --format json)" = "[]"
+
+ remove_images
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+}
+
+test_mirror_snapshot_schedule_recovery() {
+ echo "testing recovery of mirror snapshot scheduler after module's RADOS client is blocklisted..."
+ remove_images
+ ceph osd pool create rbd3 8
+ rbd pool init rbd3
+ rbd namespace create rbd3/ns1
+
+ rbd mirror pool enable rbd3 image
+ rbd mirror pool enable rbd3/ns1 image
+ rbd mirror pool peer add rbd3 cluster1
+
+ rbd create $RBD_CREATE_ARGS -s 1 rbd3/ns1/test1
+ rbd mirror image enable rbd3/ns1/test1 snapshot
+ test "$(rbd mirror image status rbd3/ns1/test1 |
+ grep -c mirror.primary)" = '1'
+
+ rbd mirror snapshot schedule add -p rbd3/ns1 --image test1 1m
+ test "$(rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1)" = 'every 1m'
+
+ # Fetch and blocklist rbd_support module's RADOS client
+ CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
+ jq 'select(.name == "rbd_support")' |
+ jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
+ ceph osd blocklist add $CLIENT_ADDR
+ ceph osd blocklist ls | grep $CLIENT_ADDR
+
+ # Check that you can add a mirror snapshot schedule after a few retries
+ expect_fail rbd mirror snapshot schedule add -p rbd3/ns1 --image test1 2m
+ sleep 10
+ for i in `seq 24`; do
+ rbd mirror snapshot schedule add -p rbd3/ns1 --image test1 2m && break
+ sleep 10
+ done
+
+ rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | grep 'every 2m'
+ # Verify that the schedule present before client blocklisting is preserved
+ rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | grep 'every 1m'
+
+ rbd mirror snapshot schedule rm -p rbd3/ns1 --image test1 2m
+ rbd mirror snapshot schedule rm -p rbd3/ns1 --image test1 1m
+ rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | expect_fail grep 'every 2m'
+ rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | expect_fail grep 'every 1m'
+
+ rbd snap purge rbd3/ns1/test1
+ rbd rm rbd3/ns1/test1
+ ceph osd pool rm rbd3 rbd3 --yes-i-really-really-mean-it
+}
+
+test_perf_image_iostat() {
+ echo "testing perf image iostat..."
+ remove_images
+
+ ceph osd pool create rbd1 8
+ rbd pool init rbd1
+ rbd namespace create rbd1/ns
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+ rbd namespace create rbd2/ns
+
+ IMAGE_SPECS=("test1" "rbd1/test2" "rbd1/ns/test3" "rbd2/test4" "rbd2/ns/test5")
+ for spec in "${IMAGE_SPECS[@]}"; do
+ # ensure all images are created without a separate data pool
+ # as we filter iostat by specific pool specs below
+ rbd create $RBD_CREATE_ARGS --size 10G --rbd-default-data-pool '' $spec
+ done
+
+ BENCH_PIDS=()
+ for spec in "${IMAGE_SPECS[@]}"; do
+ rbd bench --io-type write --io-pattern rand --io-total 10G --io-threads 1 \
+ --rbd-cache false $spec >/dev/null 2>&1 &
+ BENCH_PIDS+=($!)
+ done
+
+ # test specifying pool spec via spec syntax
+ test "$(rbd perf image iostat --format json rbd1 |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test2'
+ test "$(rbd perf image iostat --format json rbd1/ns |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test3'
+ test "$(rbd perf image iostat --format json --rbd-default-pool rbd1 /ns |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test3'
+
+ # test specifying pool spec via options
+ test "$(rbd perf image iostat --format json --pool rbd2 |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test4'
+ test "$(rbd perf image iostat --format json --pool rbd2 --namespace ns |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test5'
+ test "$(rbd perf image iostat --format json --rbd-default-pool rbd2 --namespace ns |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test5'
+
+ # test omitting pool spec (-> GLOBAL_POOL_KEY)
+ test "$(rbd perf image iostat --format json |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test1 test2 test3 test4 test5'
+
+ for pid in "${BENCH_PIDS[@]}"; do
+ kill $pid
+ done
+ wait
+
+ remove_images
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+ ceph osd pool rm rbd1 rbd1 --yes-i-really-really-mean-it
+}
+
+test_perf_image_iostat_recovery() {
+ echo "testing recovery of perf handler after module's RADOS client is blocklisted..."
+ remove_images
+
+ ceph osd pool create rbd3 8
+ rbd pool init rbd3
+ rbd namespace create rbd3/ns
+
+ IMAGE_SPECS=("rbd3/test1" "rbd3/ns/test2")
+ for spec in "${IMAGE_SPECS[@]}"; do
+ # ensure all images are created without a separate data pool
+ # as we filter iostat by specific pool specs below
+ rbd create $RBD_CREATE_ARGS --size 10G --rbd-default-data-pool '' $spec
+ done
+
+ BENCH_PIDS=()
+ for spec in "${IMAGE_SPECS[@]}"; do
+ rbd bench --io-type write --io-pattern rand --io-total 10G --io-threads 1 \
+ --rbd-cache false $spec >/dev/null 2>&1 &
+ BENCH_PIDS+=($!)
+ done
+
+ test "$(rbd perf image iostat --format json rbd3 |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test1'
+
+ # Fetch and blocklist the rbd_support module's RADOS client
+ CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
+ jq 'select(.name == "rbd_support")' |
+ jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
+ ceph osd blocklist add $CLIENT_ADDR
+ ceph osd blocklist ls | grep $CLIENT_ADDR
+
+ expect_fail rbd perf image iostat --format json rbd3/ns
+ sleep 10
+ for i in `seq 24`; do
+ test "$(rbd perf image iostat --format json rbd3/ns |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test2' && break
+ sleep 10
+ done
+
+ for pid in "${BENCH_PIDS[@]}"; do
+ kill $pid
+ done
+ wait
+
+ remove_images
+ ceph osd pool rm rbd3 rbd3 --yes-i-really-really-mean-it
+}
+
+test_mirror_pool_peer_bootstrap_create() {
+ echo "testing mirror pool peer bootstrap create..."
+ remove_images
+
+ ceph osd pool create rbd1 8
+ rbd pool init rbd1
+ rbd mirror pool enable rbd1 image
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+ rbd mirror pool enable rbd2 pool
+
+ readarray -t MON_ADDRS < <(ceph mon dump |
+ sed -n 's/^[0-9]: \(.*\) mon\.[a-z]$/\1/p')
+
+ # check that all monitors make it to the token even if only one
+ # valid monitor is specified
+ BAD_MON_ADDR="1.2.3.4:6789"
+ MON_HOST="${MON_ADDRS[0]},$BAD_MON_ADDR"
+ TOKEN="$(rbd mirror pool peer bootstrap create \
+ --mon-host "$MON_HOST" rbd1 | base64 -d)"
+ TOKEN_FSID="$(jq -r '.fsid' <<< "$TOKEN")"
+ TOKEN_CLIENT_ID="$(jq -r '.client_id' <<< "$TOKEN")"
+ TOKEN_KEY="$(jq -r '.key' <<< "$TOKEN")"
+ TOKEN_MON_HOST="$(jq -r '.mon_host' <<< "$TOKEN")"
+
+ test "$TOKEN_FSID" = "$(ceph fsid)"
+ test "$TOKEN_KEY" = "$(ceph auth get-key client.$TOKEN_CLIENT_ID)"
+ for addr in "${MON_ADDRS[@]}"; do
+ fgrep "$addr" <<< "$TOKEN_MON_HOST"
+ done
+ expect_fail fgrep "$BAD_MON_ADDR" <<< "$TOKEN_MON_HOST"
+
+ # check that the token does not change, including across pools
+ test "$(rbd mirror pool peer bootstrap create \
+ --mon-host "$MON_HOST" rbd1 | base64 -d)" = "$TOKEN"
+ test "$(rbd mirror pool peer bootstrap create \
+ rbd1 | base64 -d)" = "$TOKEN"
+ test "$(rbd mirror pool peer bootstrap create \
+ --mon-host "$MON_HOST" rbd2 | base64 -d)" = "$TOKEN"
+ test "$(rbd mirror pool peer bootstrap create \
+ rbd2 | base64 -d)" = "$TOKEN"
+
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+ ceph osd pool rm rbd1 rbd1 --yes-i-really-really-mean-it
+}
+
+test_tasks_removed_pool() {
+ echo "testing removing pool under running tasks..."
+ remove_images
+
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+
+ rbd create $RBD_CREATE_ARGS --size 1G foo
+ rbd snap create foo@snap
+ rbd snap protect foo@snap
+ rbd clone foo@snap bar
+
+ rbd create $RBD_CREATE_ARGS --size 1G rbd2/dummy
+ rbd bench --io-type write --io-pattern seq --io-size 1M --io-total 1G rbd2/dummy
+ rbd snap create rbd2/dummy@snap
+ rbd snap protect rbd2/dummy@snap
+ for i in {1..5}; do
+ rbd clone rbd2/dummy@snap rbd2/dummy$i
+ done
+
+ # queue flattens on a few dummy images and remove that pool
+ test "$(ceph rbd task list)" = "[]"
+ for i in {1..5}; do
+ ceph rbd task add flatten rbd2/dummy$i
+ done
+ ceph osd pool delete rbd2 rbd2 --yes-i-really-really-mean-it
+ test "$(ceph rbd task list)" != "[]"
+
+ # queue flatten on another image and check that it completes
+ rbd info bar | grep 'parent: '
+ expect_fail rbd snap unprotect foo@snap
+ ceph rbd task add flatten bar
+ for i in {1..12}; do
+ rbd info bar | grep 'parent: ' || break
+ sleep 10
+ done
+ rbd info bar | expect_fail grep 'parent: '
+ rbd snap unprotect foo@snap
+
+ # check that flattens disrupted by pool removal are cleaned up
+ for i in {1..12}; do
+ test "$(ceph rbd task list)" = "[]" && break
+ sleep 10
+ done
+ test "$(ceph rbd task list)" = "[]"
+
+ remove_images
+}
+
+test_tasks_recovery() {
+ echo "testing task handler recovery after module's RADOS client is blocklisted..."
+ remove_images
+
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+
+ rbd create $RBD_CREATE_ARGS --size 1G rbd2/img1
+ rbd bench --io-type write --io-pattern seq --io-size 1M --io-total 1G rbd2/img1
+ rbd snap create rbd2/img1@snap
+ rbd snap protect rbd2/img1@snap
+ rbd clone rbd2/img1@snap rbd2/clone1
+
+ # Fetch and blocklist rbd_support module's RADOS client
+ CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
+ jq 'select(.name == "rbd_support")' |
+ jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
+ ceph osd blocklist add $CLIENT_ADDR
+ ceph osd blocklist ls | grep $CLIENT_ADDR
+
+ expect_fail ceph rbd task add flatten rbd2/clone1
+ sleep 10
+ for i in `seq 24`; do
+ ceph rbd task add flatten rbd2/clone1 && break
+ sleep 10
+ done
+ test "$(ceph rbd task list)" != "[]"
+
+ for i in {1..12}; do
+ rbd info rbd2/clone1 | grep 'parent: ' || break
+ sleep 10
+ done
+ rbd info rbd2/clone1 | expect_fail grep 'parent: '
+ rbd snap unprotect rbd2/img1@snap
+
+ test "$(ceph rbd task list)" = "[]"
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+}
+
+test_pool_image_args
+test_rename
+test_ls
+test_remove
+test_migration
+test_config
+RBD_CREATE_ARGS=""
+test_others
+test_locking
+test_thick_provision
+RBD_CREATE_ARGS="--image-format 2"
+test_others
+test_locking
+test_clone
+test_trash
+test_purge
+test_deep_copy_clone
+test_clone_v2
+test_thick_provision
+test_namespace
+test_trash_purge_schedule
+test_trash_purge_schedule_recovery
+test_mirror_snapshot_schedule
+test_mirror_snapshot_schedule_recovery
+test_perf_image_iostat
+test_perf_image_iostat_recovery
+test_mirror_pool_peer_bootstrap_create
+test_tasks_removed_pool
+test_tasks_recovery
+
+echo OK
diff --git a/qa/workunits/rbd/cli_migration.sh b/qa/workunits/rbd/cli_migration.sh
new file mode 100755
index 000000000..be8e031fd
--- /dev/null
+++ b/qa/workunits/rbd/cli_migration.sh
@@ -0,0 +1,357 @@
+#!/usr/bin/env bash
+set -ex
+
+. $(dirname $0)/../../standalone/ceph-helpers.sh
+
+TEMPDIR=
+IMAGE1=image1
+IMAGE2=image2
+IMAGE3=image3
+IMAGES="${IMAGE1} ${IMAGE2} ${IMAGE3}"
+
+cleanup() {
+ cleanup_tempdir
+ remove_images
+}
+
+setup_tempdir() {
+ TEMPDIR=`mktemp -d`
+}
+
+cleanup_tempdir() {
+ rm -rf ${TEMPDIR}
+}
+
+create_base_image() {
+ local image=$1
+
+ rbd create --size 1G ${image}
+ rbd bench --io-type write --io-pattern rand --io-size=4K --io-total 256M ${image}
+ rbd snap create ${image}@1
+ rbd bench --io-type write --io-pattern rand --io-size=4K --io-total 64M ${image}
+ rbd snap create ${image}@2
+ rbd bench --io-type write --io-pattern rand --io-size=4K --io-total 128M ${image}
+}
+
+export_raw_image() {
+ local image=$1
+
+ rm -rf "${TEMPDIR}/${image}"
+ rbd export ${image} "${TEMPDIR}/${image}"
+}
+
+export_base_image() {
+ local image=$1
+
+ export_raw_image "${image}"
+ export_raw_image "${image}@1"
+ export_raw_image "${image}@2"
+}
+
+remove_image() {
+ local image=$1
+
+ (rbd migration abort $image || true) >/dev/null 2>&1
+ (rbd snap purge $image || true) >/dev/null 2>&1
+ (rbd rm $image || true) >/dev/null 2>&1
+}
+
+remove_images() {
+ for image in ${IMAGES}
+ do
+ remove_image ${image}
+ done
+}
+
+show_diff()
+{
+ local file1=$1
+ local file2=$2
+
+ xxd "${file1}" > "${file1}.xxd"
+ xxd "${file2}" > "${file2}.xxd"
+ sdiff -s "${file1}.xxd" "${file2}.xxd" | head -n 64
+ rm -f "${file1}.xxd" "${file2}.xxd"
+}
+
+compare_images() {
+ local src_image=$1
+ local dst_image=$2
+ local ret=0
+
+ export_raw_image ${dst_image}
+ if ! cmp "${TEMPDIR}/${src_image}" "${TEMPDIR}/${dst_image}"
+ then
+ show_diff "${TEMPDIR}/${src_image}" "${TEMPDIR}/${dst_image}"
+ ret=1
+ fi
+ return ${ret}
+}
+
+test_import_native_format() {
+ local base_image=$1
+ local dest_image=$2
+
+ rbd migration prepare --import-only "rbd/${base_image}@2" ${dest_image}
+ rbd migration abort ${dest_image}
+
+ local pool_id=$(ceph osd pool ls detail --format xml | xmlstarlet sel -t -v "//pools/pool[pool_name='rbd']/pool_id")
+ cat > ${TEMPDIR}/spec.json <<EOF
+{
+ "type": "native",
+ "pool_id": ${pool_id},
+ "pool_namespace": "",
+ "image_name": "${base_image}",
+ "snap_name": "2"
+}
+EOF
+ cat ${TEMPDIR}/spec.json
+
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+
+ compare_images "${base_image}@1" "${dest_image}@1"
+ compare_images "${base_image}@2" "${dest_image}@2"
+
+ rbd migration abort ${dest_image}
+
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+ rbd migration execute ${dest_image}
+
+ compare_images "${base_image}@1" "${dest_image}@1"
+ compare_images "${base_image}@2" "${dest_image}@2"
+
+ rbd migration abort ${dest_image}
+
+ rbd migration prepare --import-only \
+ --source-spec "{\"type\": \"native\", \"pool_id\": "${pool_id}", \"image_name\": \"${base_image}\", \"snap_name\": \"2\"}" \
+ ${dest_image}
+ rbd migration abort ${dest_image}
+
+ rbd migration prepare --import-only \
+ --source-spec "{\"type\": \"native\", \"pool_name\": \"rbd\", \"image_name\": \"${base_image}\", \"snap_name\": \"2\"}" \
+ ${dest_image}
+ rbd migration execute ${dest_image}
+ rbd migration commit ${dest_image}
+
+ compare_images "${base_image}@1" "${dest_image}@1"
+ compare_images "${base_image}@2" "${dest_image}@2"
+
+ remove_image "${dest_image}"
+}
+
+test_import_qcow_format() {
+ local base_image=$1
+ local dest_image=$2
+
+ if ! qemu-img convert -f raw -O qcow rbd:rbd/${base_image} ${TEMPDIR}/${base_image}.qcow; then
+ echo "skipping QCOW test"
+ return 0
+ fi
+ qemu-img info -f qcow ${TEMPDIR}/${base_image}.qcow
+
+ cat > ${TEMPDIR}/spec.json <<EOF
+{
+ "type": "qcow",
+ "stream": {
+ "type": "file",
+ "file_path": "${TEMPDIR}/${base_image}.qcow"
+ }
+}
+EOF
+ cat ${TEMPDIR}/spec.json
+
+ set +e
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+ local error_code=$?
+ set -e
+
+ if [ $error_code -eq 95 ]; then
+ echo "skipping QCOW test (librbd support disabled)"
+ return 0
+ fi
+ test $error_code -eq 0
+
+ compare_images "${base_image}" "${dest_image}"
+
+ rbd migration abort ${dest_image}
+
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+
+ compare_images "${base_image}" "${dest_image}"
+
+ rbd migration execute ${dest_image}
+
+ compare_images "${base_image}" "${dest_image}"
+
+ rbd migration commit ${dest_image}
+
+ compare_images "${base_image}" "${dest_image}"
+
+ remove_image "${dest_image}"
+}
+
+test_import_qcow2_format() {
+ local base_image=$1
+ local dest_image=$2
+
+ # create new image via qemu-img and its bench tool since we cannot
+ # import snapshot deltas into QCOW2
+ qemu-img create -f qcow2 ${TEMPDIR}/${base_image}.qcow2 1G
+
+ qemu-img bench -f qcow2 -w -c 65536 -d 16 --pattern 65 -s 4096 \
+ -S $((($RANDOM % 262144) * 4096)) ${TEMPDIR}/${base_image}.qcow2
+ qemu-img convert -f qcow2 -O raw ${TEMPDIR}/${base_image}.qcow2 \
+ "${TEMPDIR}/${base_image}@snap1"
+ qemu-img snapshot -c "snap1" ${TEMPDIR}/${base_image}.qcow2
+
+ qemu-img bench -f qcow2 -w -c 16384 -d 16 --pattern 66 -s 4096 \
+ -S $((($RANDOM % 262144) * 4096)) ${TEMPDIR}/${base_image}.qcow2
+ qemu-img convert -f qcow2 -O raw ${TEMPDIR}/${base_image}.qcow2 \
+ "${TEMPDIR}/${base_image}@snap2"
+ qemu-img snapshot -c "snap2" ${TEMPDIR}/${base_image}.qcow2
+
+ qemu-img bench -f qcow2 -w -c 32768 -d 16 --pattern 67 -s 4096 \
+ -S $((($RANDOM % 262144) * 4096)) ${TEMPDIR}/${base_image}.qcow2
+ qemu-img convert -f qcow2 -O raw ${TEMPDIR}/${base_image}.qcow2 \
+ ${TEMPDIR}/${base_image}
+
+ qemu-img info -f qcow2 ${TEMPDIR}/${base_image}.qcow2
+
+ cat > ${TEMPDIR}/spec.json <<EOF
+{
+ "type": "qcow",
+ "stream": {
+ "type": "file",
+ "file_path": "${TEMPDIR}/${base_image}.qcow2"
+ }
+}
+EOF
+ cat ${TEMPDIR}/spec.json
+
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+
+ compare_images "${base_image}@snap1" "${dest_image}@snap1"
+ compare_images "${base_image}@snap2" "${dest_image}@snap2"
+ compare_images "${base_image}" "${dest_image}"
+
+ rbd migration abort ${dest_image}
+
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+
+ compare_images "${base_image}@snap1" "${dest_image}@snap1"
+ compare_images "${base_image}@snap2" "${dest_image}@snap2"
+ compare_images "${base_image}" "${dest_image}"
+
+ rbd migration execute ${dest_image}
+
+ compare_images "${base_image}@snap1" "${dest_image}@snap1"
+ compare_images "${base_image}@snap2" "${dest_image}@snap2"
+ compare_images "${base_image}" "${dest_image}"
+
+ rbd migration commit ${dest_image}
+
+ compare_images "${base_image}@snap1" "${dest_image}@snap1"
+ compare_images "${base_image}@snap2" "${dest_image}@snap2"
+ compare_images "${base_image}" "${dest_image}"
+
+ remove_image "${dest_image}"
+}
+
+test_import_raw_format() {
+ local base_image=$1
+ local dest_image=$2
+
+ cat > ${TEMPDIR}/spec.json <<EOF
+{
+ "type": "raw",
+ "stream": {
+ "type": "file",
+ "file_path": "${TEMPDIR}/${base_image}"
+ }
+}
+EOF
+ cat ${TEMPDIR}/spec.json
+
+ cat ${TEMPDIR}/spec.json | rbd migration prepare --import-only \
+ --source-spec-path - ${dest_image}
+ compare_images ${base_image} ${dest_image}
+ rbd migration abort ${dest_image}
+
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+ rbd migration execute ${dest_image}
+ rbd migration commit ${dest_image}
+
+ compare_images ${base_image} ${dest_image}
+
+ remove_image "${dest_image}"
+
+ cat > ${TEMPDIR}/spec.json <<EOF
+{
+ "type": "raw",
+ "stream": {
+ "type": "file",
+ "file_path": "${TEMPDIR}/${base_image}"
+ },
+ "snapshots": [{
+ "type": "raw",
+ "name": "snap1",
+ "stream": {
+ "type": "file",
+ "file_path": "${TEMPDIR}/${base_image}@1"
+ }
+ }, {
+ "type": "raw",
+ "name": "snap2",
+ "stream": {
+ "type": "file",
+ "file_path": "${TEMPDIR}/${base_image}@2"
+ }
+ }]
+}
+EOF
+ cat ${TEMPDIR}/spec.json
+
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+
+ rbd snap create ${dest_image}@head
+ rbd bench --io-type write --io-pattern rand --io-size=32K --io-total=32M ${dest_image}
+
+ compare_images "${base_image}" "${dest_image}@head"
+ compare_images "${base_image}@1" "${dest_image}@snap1"
+ compare_images "${base_image}@2" "${dest_image}@snap2"
+ compare_images "${base_image}" "${dest_image}@head"
+
+ rbd migration execute ${dest_image}
+
+ compare_images "${base_image}@1" "${dest_image}@snap1"
+ compare_images "${base_image}@2" "${dest_image}@snap2"
+ compare_images "${base_image}" "${dest_image}@head"
+
+ rbd migration commit ${dest_image}
+
+ remove_image "${dest_image}"
+}
+
+# make sure rbd pool is EMPTY.. this is a test script!!
+rbd ls 2>&1 | wc -l | grep -v '^0$' && echo "nonempty rbd pool, aborting! run this script on an empty test cluster only." && exit 1
+
+setup_tempdir
+trap 'cleanup $?' INT TERM EXIT
+
+create_base_image ${IMAGE1}
+export_base_image ${IMAGE1}
+
+test_import_native_format ${IMAGE1} ${IMAGE2}
+test_import_qcow_format ${IMAGE1} ${IMAGE2}
+test_import_qcow2_format ${IMAGE2} ${IMAGE3}
+test_import_raw_format ${IMAGE1} ${IMAGE2}
+
+echo OK
diff --git a/qa/workunits/rbd/concurrent.sh b/qa/workunits/rbd/concurrent.sh
new file mode 100755
index 000000000..abaad75f5
--- /dev/null
+++ b/qa/workunits/rbd/concurrent.sh
@@ -0,0 +1,375 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2013 Inktank Storage, Inc.
+#
+# This is free software; see the source for copying conditions.
+# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE.
+#
+# This is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as
+# published by the Free Software Foundation version 2.
+
+# Alex Elder <elder@inktank.com>
+# January 29, 2013
+
+################################################################
+
+# The purpose of this test is to exercise paths through the rbd
+# code, making sure no bad pointer references or invalid reference
+# count operations occur in the face of concurrent activity.
+#
+# Each pass of the test creates an rbd image, maps it, and writes
+# some data into the image. It also reads some data from all of the
+# other images that exist at the time the pass executes. Finally,
+# the image is unmapped and removed. The image removal completes in
+# the background.
+#
+# An iteration of the test consists of performing some number of
+# passes, initating each pass as a background job, and finally
+# sleeping for a variable delay. The delay is initially a specified
+# value, but each iteration shortens that proportionally, such that
+# the last iteration will not delay at all.
+#
+# The result exercises concurrent creates and deletes of rbd images,
+# writes to new images, reads from both written and unwritten image
+# data (including reads concurrent with writes), and attempts to
+# unmap images being read.
+
+# Usage: concurrent [-i <iter>] [-c <count>] [-d <delay>]
+#
+# Exit status:
+# 0: success
+# 1: usage error
+# 2: other runtime error
+# 99: argument count error (programming error)
+# 100: getopt error (internal error)
+
+################################################################
+
+set -ex
+
+# Default flag values; RBD_CONCURRENT_ITER names are intended
+# to be used in yaml scripts to pass in alternate values, e.g.:
+# env:
+# RBD_CONCURRENT_ITER: 20
+# RBD_CONCURRENT_COUNT: 5
+# RBD_CONCURRENT_DELAY: 3
+ITER_DEFAULT=${RBD_CONCURRENT_ITER:-100}
+COUNT_DEFAULT=${RBD_CONCURRENT_COUNT:-5}
+DELAY_DEFAULT=${RBD_CONCURRENT_DELAY:-5} # seconds
+
+CEPH_SECRET_FILE=${CEPH_SECRET_FILE:-}
+CEPH_ID=${CEPH_ID:-admin}
+SECRET_ARGS=""
+if [ "${CEPH_SECRET_FILE}" ]; then
+ SECRET_ARGS="--secret $CEPH_SECRET_FILE"
+fi
+
+################################################################
+
+function setup() {
+ ID_MAX_DIR=$(mktemp -d /tmp/image_max_id.XXXXX)
+ ID_COUNT_DIR=$(mktemp -d /tmp/image_ids.XXXXXX)
+ NAMES_DIR=$(mktemp -d /tmp/image_names.XXXXXX)
+ SOURCE_DATA=$(mktemp /tmp/source_data.XXXXXX)
+
+ # Use urandom to generate SOURCE_DATA
+ dd if=/dev/urandom of=${SOURCE_DATA} bs=2048 count=66 \
+ >/dev/null 2>&1
+
+ # List of rbd id's *not* created by this script
+ export INITIAL_RBD_IDS=$(ls /sys/bus/rbd/devices)
+
+ # Set up some environment for normal teuthology test setup.
+ # This really should not be necessary but I found it was.
+
+ export CEPH_ARGS=" --name client.0"
+}
+
+function cleanup() {
+ [ ! "${ID_MAX_DIR}" ] && return
+ local id
+ local image
+
+ # Unmap mapped devices
+ for id in $(rbd_ids); do
+ image=$(cat "/sys/bus/rbd/devices/${id}/name")
+ rbd_unmap_image "${id}"
+ rbd_destroy_image "${image}"
+ done
+ # Get any leftover images
+ for image in $(rbd ls 2>/dev/null); do
+ rbd_destroy_image "${image}"
+ done
+ wait
+ sync
+ rm -f "${SOURCE_DATA}"
+ [ -d "${NAMES_DIR}" ] && rmdir "${NAMES_DIR}"
+ echo "Max concurrent rbd image count was $(get_max "${ID_COUNT_DIR}")"
+ rm -rf "${ID_COUNT_DIR}"
+ echo "Max rbd image id was $(get_max "${ID_MAX_DIR}")"
+ rm -rf "${ID_MAX_DIR}"
+}
+
+function get_max() {
+ [ $# -eq 1 ] || exit 99
+ local dir="$1"
+
+ ls -U "${dir}" | sort -n | tail -1
+}
+
+trap cleanup HUP INT QUIT
+
+# print a usage message and quit
+#
+# if a message is supplied, print that first, and then exit
+# with non-zero status
+function usage() {
+ if [ $# -gt 0 ]; then
+ echo "" >&2
+ echo "$@" >&2
+ fi
+
+ echo "" >&2
+ echo "Usage: ${PROGNAME} <options> <tests>" >&2
+ echo "" >&2
+ echo " options:" >&2
+ echo " -h or --help" >&2
+ echo " show this message" >&2
+ echo " -i or --iterations" >&2
+ echo " iteration count (1 or more)" >&2
+ echo " -c or --count" >&2
+ echo " images created per iteration (1 or more)" >&2
+ echo " -d or --delay" >&2
+ echo " maximum delay between iterations" >&2
+ echo "" >&2
+ echo " defaults:" >&2
+ echo " iterations: ${ITER_DEFAULT}"
+ echo " count: ${COUNT_DEFAULT}"
+ echo " delay: ${DELAY_DEFAULT} (seconds)"
+ echo "" >&2
+
+ [ $# -gt 0 ] && exit 1
+
+ exit 0 # This is used for a --help
+}
+
+# parse command line arguments
+function parseargs() {
+ ITER="${ITER_DEFAULT}"
+ COUNT="${COUNT_DEFAULT}"
+ DELAY="${DELAY_DEFAULT}"
+
+ # Short option flags
+ SHORT_OPTS=""
+ SHORT_OPTS="${SHORT_OPTS},h"
+ SHORT_OPTS="${SHORT_OPTS},i:"
+ SHORT_OPTS="${SHORT_OPTS},c:"
+ SHORT_OPTS="${SHORT_OPTS},d:"
+
+ # Short option flags
+ LONG_OPTS=""
+ LONG_OPTS="${LONG_OPTS},help"
+ LONG_OPTS="${LONG_OPTS},iterations:"
+ LONG_OPTS="${LONG_OPTS},count:"
+ LONG_OPTS="${LONG_OPTS},delay:"
+
+ TEMP=$(getopt --name "${PROGNAME}" \
+ --options "${SHORT_OPTS}" \
+ --longoptions "${LONG_OPTS}" \
+ -- "$@")
+ eval set -- "$TEMP"
+
+ while [ "$1" != "--" ]; do
+ case "$1" in
+ -h|--help)
+ usage
+ ;;
+ -i|--iterations)
+ ITER="$2"
+ [ "${ITER}" -lt 1 ] &&
+ usage "bad iterations value"
+ shift
+ ;;
+ -c|--count)
+ COUNT="$2"
+ [ "${COUNT}" -lt 1 ] &&
+ usage "bad count value"
+ shift
+ ;;
+ -d|--delay)
+ DELAY="$2"
+ shift
+ ;;
+ *)
+ exit 100 # Internal error
+ ;;
+ esac
+ shift
+ done
+ shift
+}
+
+function rbd_ids() {
+ [ $# -eq 0 ] || exit 99
+ local ids
+ local i
+
+ [ -d /sys/bus/rbd ] || return
+ ids=" $(echo $(ls /sys/bus/rbd/devices)) "
+ for i in ${INITIAL_RBD_IDS}; do
+ ids=${ids/ ${i} / }
+ done
+ echo ${ids}
+}
+
+function update_maxes() {
+ local ids="$@"
+ local last_id
+ # These aren't 100% safe against concurrent updates but it
+ # should be pretty close
+ count=$(echo ${ids} | wc -w)
+ touch "${ID_COUNT_DIR}/${count}"
+ last_id=${ids% }
+ last_id=${last_id##* }
+ touch "${ID_MAX_DIR}/${last_id}"
+}
+
+function rbd_create_image() {
+ [ $# -eq 0 ] || exit 99
+ local image=$(basename $(mktemp "${NAMES_DIR}/image.XXXXXX"))
+
+ rbd create "${image}" --size=1024
+ echo "${image}"
+}
+
+function rbd_image_id() {
+ [ $# -eq 1 ] || exit 99
+ local image="$1"
+
+ grep -l "${image}" /sys/bus/rbd/devices/*/name 2>/dev/null |
+ cut -d / -f 6
+}
+
+function rbd_map_image() {
+ [ $# -eq 1 ] || exit 99
+ local image="$1"
+ local id
+
+ sudo rbd map "${image}" --user "${CEPH_ID}" ${SECRET_ARGS} \
+ > /dev/null 2>&1
+
+ id=$(rbd_image_id "${image}")
+ echo "${id}"
+}
+
+function rbd_write_image() {
+ [ $# -eq 1 ] || exit 99
+ local id="$1"
+
+ # Offset and size here are meant to ensure beginning and end
+ # cross both (4K or 64K) page and (4MB) rbd object boundaries.
+ # It assumes the SOURCE_DATA file has size 66 * 2048 bytes
+ dd if="${SOURCE_DATA}" of="/dev/rbd${id}" bs=2048 seek=2015 \
+ > /dev/null 2>&1
+}
+
+# All starting and ending offsets here are selected so they are not
+# aligned on a (4 KB or 64 KB) page boundary
+function rbd_read_image() {
+ [ $# -eq 1 ] || exit 99
+ local id="$1"
+
+ # First read starting and ending at an offset before any
+ # written data. The osd zero-fills data read from an
+ # existing rbd object, but before any previously-written
+ # data.
+ dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=3 \
+ > /dev/null 2>&1
+ # Next read starting at an offset before any written data,
+ # but ending at an offset that includes data that's been
+ # written. The osd zero-fills unwritten data at the
+ # beginning of a read.
+ dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=1983 \
+ > /dev/null 2>&1
+ # Read the data at offset 2015 * 2048 bytes (where it was
+ # written) and make sure it matches the original data.
+ cmp --quiet "${SOURCE_DATA}" "/dev/rbd${id}" 0 4126720 ||
+ echo "MISMATCH!!!"
+ # Now read starting within the pre-written data, but ending
+ # beyond it. The rbd client zero-fills the unwritten
+ # portion at the end of a read.
+ dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=2079 \
+ > /dev/null 2>&1
+ # Now read starting from an unwritten range within a written
+ # rbd object. The rbd client zero-fills this.
+ dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=2115 \
+ > /dev/null 2>&1
+ # Finally read from an unwritten region which would reside
+ # in a different (non-existent) osd object. The osd client
+ # zero-fills unwritten data when the target object doesn't
+ # exist.
+ dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=4098 \
+ > /dev/null 2>&1
+}
+
+function rbd_unmap_image() {
+ [ $# -eq 1 ] || exit 99
+ local id="$1"
+
+ sudo rbd unmap "/dev/rbd${id}"
+}
+
+function rbd_destroy_image() {
+ [ $# -eq 1 ] || exit 99
+ local image="$1"
+
+ # Don't wait for it to complete, to increase concurrency
+ rbd rm "${image}" >/dev/null 2>&1 &
+ rm -f "${NAMES_DIR}/${image}"
+}
+
+function one_pass() {
+ [ $# -eq 0 ] || exit 99
+ local image
+ local id
+ local ids
+ local i
+
+ image=$(rbd_create_image)
+ id=$(rbd_map_image "${image}")
+ ids=$(rbd_ids)
+ update_maxes "${ids}"
+ for i in ${rbd_ids}; do
+ if [ "${i}" -eq "${id}" ]; then
+ rbd_write_image "${i}"
+ else
+ rbd_read_image "${i}"
+ fi
+ done
+ rbd_unmap_image "${id}"
+ rbd_destroy_image "${image}"
+}
+
+################################################################
+
+parseargs "$@"
+
+setup
+
+for iter in $(seq 1 "${ITER}"); do
+ for count in $(seq 1 "${COUNT}"); do
+ one_pass &
+ done
+ # Sleep longer at first, overlap iterations more later.
+ # Use awk to get sub-second granularity (see sleep(1)).
+ sleep $(echo "${DELAY}" "${iter}" "${ITER}" |
+ awk '{ printf("%.2f\n", $1 - $1 * $2 / $3);}')
+
+done
+wait
+
+cleanup
+
+exit 0
diff --git a/qa/workunits/rbd/crimson/test_crimson_librbd.sh b/qa/workunits/rbd/crimson/test_crimson_librbd.sh
new file mode 100755
index 000000000..fb308de41
--- /dev/null
+++ b/qa/workunits/rbd/crimson/test_crimson_librbd.sh
@@ -0,0 +1,35 @@
+#!/bin/sh -e
+
+if [ -n "${VALGRIND}" ]; then
+ valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
+ --error-exitcode=1 ceph_test_librbd
+else
+ # Run test cases indivually to allow better selection
+ # of ongoing Crimson development.
+ # Disabled test groups are tracked here:
+ # https://tracker.ceph.com/issues/58791
+ ceph_test_librbd --gtest_filter='TestLibRBD.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/0.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/1.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/2.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/3.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/4.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/5.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/6.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/7.*'
+ # ceph_test_librbd --gtest_filter='DiffIterateTest/0.*'
+ # ceph_test_librbd --gtest_filter='DiffIterateTest/1.*'
+ ceph_test_librbd --gtest_filter='TestImageWatcher.*'
+ ceph_test_librbd --gtest_filter='TestInternal.*'
+ ceph_test_librbd --gtest_filter='TestMirroring.*'
+ # ceph_test_librbd --gtest_filter='TestDeepCopy.*'
+ ceph_test_librbd --gtest_filter='TestGroup.*'
+ # ceph_test_librbd --gtest_filter='TestMigration.*'
+ ceph_test_librbd --gtest_filter='TestMirroringWatcher.*'
+ ceph_test_librbd --gtest_filter='TestObjectMap.*'
+ ceph_test_librbd --gtest_filter='TestOperations.*'
+ ceph_test_librbd --gtest_filter='TestTrash.*'
+ ceph_test_librbd --gtest_filter='TestJournalEntries.*'
+ ceph_test_librbd --gtest_filter='TestJournalReplay.*'
+fi
+exit 0
diff --git a/qa/workunits/rbd/diff.sh b/qa/workunits/rbd/diff.sh
new file mode 100755
index 000000000..fbd6e0642
--- /dev/null
+++ b/qa/workunits/rbd/diff.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+set -ex
+
+function cleanup() {
+ rbd snap purge foo || :
+ rbd rm foo || :
+ rbd snap purge foo.copy || :
+ rbd rm foo.copy || :
+ rbd snap purge foo.copy2 || :
+ rbd rm foo.copy2 || :
+ rm -f foo.diff foo.out
+}
+
+cleanup
+
+rbd create foo --size 1000
+rbd bench --io-type write foo --io-size 4096 --io-threads 5 --io-total 4096000 --io-pattern rand
+
+#rbd cp foo foo.copy
+rbd create foo.copy --size 1000
+rbd export-diff foo - | rbd import-diff - foo.copy
+
+rbd snap create foo --snap=two
+rbd bench --io-type write foo --io-size 4096 --io-threads 5 --io-total 4096000 --io-pattern rand
+rbd snap create foo --snap=three
+rbd snap create foo.copy --snap=two
+
+rbd export-diff foo@two --from-snap three foo.diff && exit 1 || true # wrong snap order
+rm -f foo.diff
+
+rbd export-diff foo@three --from-snap two foo.diff
+rbd import-diff foo.diff foo.copy
+rbd import-diff foo.diff foo.copy && exit 1 || true # this should fail with EEXIST on the end snap
+rbd snap ls foo.copy | grep three
+
+rbd create foo.copy2 --size 1000
+rbd import-diff foo.diff foo.copy2 && exit 1 || true # this should fail bc the start snap dne
+
+rbd export foo foo.out
+orig=`md5sum foo.out | awk '{print $1}'`
+rm foo.out
+rbd export foo.copy foo.out
+copy=`md5sum foo.out | awk '{print $1}'`
+
+if [ "$orig" != "$copy" ]; then
+ echo does not match
+ exit 1
+fi
+
+cleanup
+
+echo OK
+
diff --git a/qa/workunits/rbd/diff_continuous.sh b/qa/workunits/rbd/diff_continuous.sh
new file mode 100755
index 000000000..fd1785e07
--- /dev/null
+++ b/qa/workunits/rbd/diff_continuous.sh
@@ -0,0 +1,106 @@
+#!/usr/bin/env bash
+
+set -ex
+set -o pipefail
+
+function untar_workload() {
+ local i
+ for ((i = 0; i < 10; i++)); do
+ pv -L 10M linux-5.4.tar.gz > "${MOUNT}/linux-5.4.tar.gz"
+ tar -C "${MOUNT}" -xzf "${MOUNT}/linux-5.4.tar.gz"
+ sync "${MOUNT}"
+ rm -rf "${MOUNT}"/linux-5.4*
+ done
+}
+
+function check_object_map() {
+ local spec="$1"
+
+ rbd object-map check "${spec}"
+
+ local flags
+ flags="$(rbd info "${spec}" | grep 'flags: ')"
+ if [[ "${flags}" =~ object\ map\ invalid ]]; then
+ echo "Object map invalid at ${spec}"
+ exit 1
+ fi
+ if [[ "${flags}" =~ fast\ diff\ invalid ]]; then
+ echo "Fast diff invalid at ${spec}"
+ exit 1
+ fi
+}
+
+# RBD_DEVICE_TYPE is intended to be set from yaml, default to krbd
+readonly DEVICE_TYPE="${RBD_DEVICE_TYPE:-krbd}"
+
+BASE_UUID="$(uuidgen)"
+readonly BASE_UUID
+
+readonly SIZE="2G"
+readonly SRC="${BASE_UUID}-src"
+readonly DST="${BASE_UUID}-dst"
+readonly MOUNT="${BASE_UUID}-mnt"
+
+rbd create -s "${SIZE}" --stripe-unit 64K --stripe-count 8 \
+ --image-feature exclusive-lock,object-map,fast-diff "${SRC}"
+rbd create -s "${SIZE}" --object-size 512K "${DST}"
+
+dev="$(sudo rbd device map -t "${DEVICE_TYPE}" "${SRC}")"
+sudo mkfs.ext4 "${dev}"
+mkdir "${MOUNT}"
+sudo mount "${dev}" "${MOUNT}"
+sudo chown "$(whoami)" "${MOUNT}"
+
+# start untar in the background
+wget https://download.ceph.com/qa/linux-5.4.tar.gz
+untar_workload &
+untar_pid=$!
+
+# export initial incremental
+snap_num=1
+rbd snap create "${SRC}@snap${snap_num}"
+rbd export-diff "${SRC}@snap${snap_num}" "${BASE_UUID}@snap${snap_num}.diff"
+
+# keep exporting successive incrementals while untar is running
+while kill -0 "${untar_pid}"; do
+ snap_num=$((snap_num + 1))
+ rbd snap create "${SRC}@snap${snap_num}"
+ sleep $((RANDOM % 4 + 1))
+ rbd export-diff --whole-object --from-snap "snap$((snap_num - 1))" \
+ "${SRC}@snap${snap_num}" "${BASE_UUID}@snap${snap_num}.diff"
+done
+
+sudo umount "${MOUNT}"
+sudo rbd device unmap -t "${DEVICE_TYPE}" "${dev}"
+
+if ! wait "${untar_pid}"; then
+ echo "untar_workload failed"
+ exit 1
+fi
+
+echo "Exported ${snap_num} incrementals"
+if ((snap_num < 30)); then
+ echo "Too few incrementals"
+ exit 1
+fi
+
+# validate
+for ((i = 1; i <= snap_num; i++)); do
+ rbd import-diff "${BASE_UUID}@snap${i}.diff" "${DST}"
+ src_sum="$(rbd export "${SRC}@snap${i}" - | md5sum | awk '{print $1}')"
+ dst_sum="$(rbd export "${DST}@snap${i}" - | md5sum | awk '{print $1}')"
+ if [[ "${src_sum}" != "${dst_sum}" ]]; then
+ echo "Mismatch at snap${i}: ${src_sum} != ${dst_sum}"
+ exit 1
+ fi
+ check_object_map "${SRC}@snap${i}"
+ # FIXME: this reproduces http://tracker.ceph.com/issues/37876
+ # there is no fstrim involved but "rbd import-diff" can produce
+ # write-zeroes requests which turn into discards under the hood
+ # actual: EXISTS, expected: EXISTS_CLEAN inconsistency is harmless
+ # from a data integrity POV and data is validated above regardless,
+ # so just waive it for now
+ #check_object_map "${DST}@snap${i}"
+done
+
+echo OK
diff --git a/qa/workunits/rbd/huge-tickets.sh b/qa/workunits/rbd/huge-tickets.sh
new file mode 100755
index 000000000..22853c07a
--- /dev/null
+++ b/qa/workunits/rbd/huge-tickets.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+# This is a test for http://tracker.ceph.com/issues/8979 and the fallout
+# from triaging it. #8979 itself was random crashes on corrupted memory
+# due to a buffer overflow (for tickets larger than 256 bytes), further
+# inspection showed that vmalloced tickets weren't handled correctly as
+# well.
+#
+# What we are doing here is generating three huge keyrings and feeding
+# them to libceph (through 'rbd map' on a scratch image). Bad kernels
+# will crash reliably either on corrupted memory somewhere or a bad page
+# fault in scatterwalk_pagedone().
+
+set -ex
+
+function generate_keyring() {
+ local user=$1
+ local n=$2
+
+ ceph-authtool -C -n client.$user --cap mon 'allow *' --gen-key /tmp/keyring-$user
+
+ set +x # don't pollute trace with echos
+ echo -en "\tcaps osd = \"allow rwx pool=rbd" >>/tmp/keyring-$user
+ for i in $(seq 1 $n); do
+ echo -n ", allow rwx pool=pool$i" >>/tmp/keyring-$user
+ done
+ echo "\"" >>/tmp/keyring-$user
+ set -x
+}
+
+generate_keyring foo 1000 # ~25K, kmalloc
+generate_keyring bar 20000 # ~500K, vmalloc
+generate_keyring baz 300000 # ~8M, vmalloc + sg chaining
+
+rbd create --size 1 test
+
+for user in {foo,bar,baz}; do
+ ceph auth import -i /tmp/keyring-$user
+ DEV=$(sudo rbd map -n client.$user --keyring /tmp/keyring-$user test)
+ sudo rbd unmap $DEV
+done
diff --git a/qa/workunits/rbd/image_read.sh b/qa/workunits/rbd/image_read.sh
new file mode 100755
index 000000000..ddca8356e
--- /dev/null
+++ b/qa/workunits/rbd/image_read.sh
@@ -0,0 +1,680 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2013 Inktank Storage, Inc.
+#
+# This is free software; see the source for copying conditions.
+# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE.
+#
+# This is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as
+# published by the Free Software Foundation version 2.
+
+# Alex Elder <elder@inktank.com>
+# April 10, 2013
+
+################################################################
+
+# The purpose of this test is to validate that data read from a
+# mapped rbd image is what it's expected to be.
+#
+# By default it creates an image and fills it with some data. It
+# then reads back the data at a series of offsets known to cover
+# various situations (such as reading the beginning, end, or the
+# entirety of an object, or doing a read that spans multiple
+# objects), and stashes the results in a set of local files.
+#
+# It also creates and maps a snapshot of the original image after
+# it's been filled, and reads back the same ranges of data from the
+# snapshot. It then compares the data read back with what was read
+# back from the original image, verifying they match.
+#
+# Clone functionality is tested as well, in which case a clone is
+# made of the snapshot, and the same ranges of data are again read
+# and compared with the original. In addition, a snapshot of that
+# clone is created, and a clone of *that* snapshot is put through
+# the same set of tests. (Clone testing can be optionally skipped.)
+
+################################################################
+
+# Default parameter values. Environment variables, if set, will
+# supercede these defaults. Such variables have names that begin
+# with "IMAGE_READ_", for e.g. use IMAGE_READ_PAGE_SIZE=65536
+# to use 65536 as the page size.
+set -e
+
+DEFAULT_VERBOSE=true
+DEFAULT_TEST_CLONES=true
+DEFAULT_LOCAL_FILES=false
+DEFAULT_FORMAT=2
+DEFAULT_DOUBLE_ORDER=true
+DEFAULT_HALF_ORDER=false
+DEFAULT_PAGE_SIZE=4096
+DEFAULT_OBJECT_ORDER=22
+MIN_OBJECT_ORDER=12 # technically 9, but the rbd CLI enforces 12
+MAX_OBJECT_ORDER=32
+
+RBD_FORCE_ALLOW_V1=1
+
+PROGNAME=$(basename $0)
+
+ORIGINAL=original-$$
+SNAP1=snap1-$$
+CLONE1=clone1-$$
+SNAP2=snap2-$$
+CLONE2=clone2-$$
+
+function err() {
+ if [ $# -gt 0 ]; then
+ echo "${PROGNAME}: $@" >&2
+ fi
+ exit 2
+}
+
+function usage() {
+ if [ $# -gt 0 ]; then
+ echo "" >&2
+ echo "${PROGNAME}: $@" >&2
+ fi
+ echo "" >&2
+ echo "Usage: ${PROGNAME} [<options>]" >&2
+ echo "" >&2
+ echo "options are:" >&2
+ echo " -o object_order" >&2
+ echo " must be ${MIN_OBJECT_ORDER}..${MAX_OBJECT_ORDER}" >&2
+ echo " -p page_size (in bytes)" >&2
+ echo " note: there must be at least 4 pages per object" >&2
+ echo " -1" >&2
+ echo " test using format 1 rbd images (default)" >&2
+ echo " -2" >&2
+ echo " test using format 2 rbd images" >&2
+ echo " -c" >&2
+ echo " also test rbd clone images (implies format 2)" >&2
+ echo " -d" >&2
+ echo " clone object order double its parent's (format 2)" >&2
+ echo " -h" >&2
+ echo " clone object order half of its parent's (format 2)" >&2
+ echo " -l" >&2
+ echo " use local files rather than rbd images" >&2
+ echo " -v" >&2
+ echo " disable reporting of what's going on" >&2
+ echo "" >&2
+ exit 1
+}
+
+function verbose() {
+ [ "${VERBOSE}" = true ] && echo "$@"
+ true # Don't let the verbose test spoil our return value
+}
+
+function quiet() {
+ "$@" 2> /dev/null
+}
+
+function boolean_toggle() {
+ [ $# -eq 1 ] || exit 99
+ test "$1" = "true" && echo false || echo true
+}
+
+function parseargs() {
+ local opts="o:p:12clv"
+ local lopts="order:,page_size:,local,clone,verbose"
+ local parsed
+ local clone_order_msg
+
+ # use values from environment if available
+ VERBOSE="${IMAGE_READ_VERBOSE:-${DEFAULT_VERBOSE}}"
+ TEST_CLONES="${IMAGE_READ_TEST_CLONES:-${DEFAULT_TEST_CLONES}}"
+ LOCAL_FILES="${IMAGE_READ_LOCAL_FILES:-${DEFAULT_LOCAL_FILES}}"
+ DOUBLE_ORDER="${IMAGE_READ_DOUBLE_ORDER:-${DEFAULT_DOUBLE_ORDER}}"
+ HALF_ORDER="${IMAGE_READ_HALF_ORDER:-${DEFAULT_HALF_ORDER}}"
+ FORMAT="${IMAGE_READ_FORMAT:-${DEFAULT_FORMAT}}"
+ PAGE_SIZE="${IMAGE_READ_PAGE_SIZE:-${DEFAULT_PAGE_SIZE}}"
+ OBJECT_ORDER="${IMAGE_READ_OBJECT_ORDER:-${DEFAULT_OBJECT_ORDER}}"
+
+ parsed=$(getopt -o "${opts}" -l "${lopts}" -n "${PROGNAME}" -- "$@") ||
+ usage
+ eval set -- "${parsed}"
+ while true; do
+ case "$1" in
+ -v|--verbose)
+ VERBOSE=$(boolean_toggle "${VERBOSE}");;
+ -c|--clone)
+ TEST_CLONES=$(boolean_toggle "${TEST_CLONES}");;
+ -d|--double)
+ DOUBLE_ORDER=$(boolean_toggle "${DOUBLE_ORDER}");;
+ -h|--half)
+ HALF_ORDER=$(boolean_toggle "${HALF_ORDER}");;
+ -l|--local)
+ LOCAL_FILES=$(boolean_toggle "${LOCAL_FILES}");;
+ -1|-2)
+ FORMAT="${1:1}";;
+ -p|--page_size)
+ PAGE_SIZE="$2"; shift;;
+ -o|--order)
+ OBJECT_ORDER="$2"; shift;;
+ --)
+ shift; break;;
+ *)
+ err "getopt internal error"
+ esac
+ shift
+ done
+ [ $# -gt 0 ] && usage "excess arguments ($*)"
+
+ if [ "${TEST_CLONES}" = true ]; then
+ # If we're using different object orders for clones,
+ # make sure the limits are updated accordingly. If
+ # both "half" and "double" are specified, just
+ # ignore them both.
+ if [ "${DOUBLE_ORDER}" = true ]; then
+ if [ "${HALF_ORDER}" = true ]; then
+ DOUBLE_ORDER=false
+ HALF_ORDER=false
+ else
+ ((MAX_OBJECT_ORDER -= 2))
+ fi
+ elif [ "${HALF_ORDER}" = true ]; then
+ ((MIN_OBJECT_ORDER += 2))
+ fi
+ fi
+
+ [ "${OBJECT_ORDER}" -lt "${MIN_OBJECT_ORDER}" ] &&
+ usage "object order (${OBJECT_ORDER}) must be" \
+ "at least ${MIN_OBJECT_ORDER}"
+ [ "${OBJECT_ORDER}" -gt "${MAX_OBJECT_ORDER}" ] &&
+ usage "object order (${OBJECT_ORDER}) must be" \
+ "at most ${MAX_OBJECT_ORDER}"
+
+ if [ "${TEST_CLONES}" = true ]; then
+ if [ "${DOUBLE_ORDER}" = true ]; then
+ ((CLONE1_ORDER = OBJECT_ORDER + 1))
+ ((CLONE2_ORDER = OBJECT_ORDER + 2))
+ clone_order_msg="double"
+ elif [ "${HALF_ORDER}" = true ]; then
+ ((CLONE1_ORDER = OBJECT_ORDER - 1))
+ ((CLONE2_ORDER = OBJECT_ORDER - 2))
+ clone_order_msg="half of"
+ else
+ CLONE1_ORDER="${OBJECT_ORDER}"
+ CLONE2_ORDER="${OBJECT_ORDER}"
+ clone_order_msg="the same as"
+ fi
+ fi
+
+ [ "${TEST_CLONES}" != true ] || FORMAT=2
+
+ OBJECT_SIZE=$(echo "2 ^ ${OBJECT_ORDER}" | bc)
+ OBJECT_PAGES=$(echo "${OBJECT_SIZE} / ${PAGE_SIZE}" | bc)
+ IMAGE_SIZE=$((2 * 16 * OBJECT_SIZE / (1024 * 1024)))
+ [ "${IMAGE_SIZE}" -lt 1 ] && IMAGE_SIZE=1
+ IMAGE_OBJECTS=$((IMAGE_SIZE * (1024 * 1024) / OBJECT_SIZE))
+
+ [ "${OBJECT_PAGES}" -lt 4 ] &&
+ usage "object size (${OBJECT_SIZE}) must be" \
+ "at least 4 * page size (${PAGE_SIZE})"
+
+ echo "parameters for this run:"
+ echo " format ${FORMAT} images will be tested"
+ echo " object order is ${OBJECT_ORDER}, so" \
+ "objects are ${OBJECT_SIZE} bytes"
+ echo " page size is ${PAGE_SIZE} bytes, so" \
+ "there are are ${OBJECT_PAGES} pages in an object"
+ echo " derived image size is ${IMAGE_SIZE} MB, so" \
+ "there are ${IMAGE_OBJECTS} objects in an image"
+ if [ "${TEST_CLONES}" = true ]; then
+ echo " clone functionality will be tested"
+ echo " object size for a clone will be ${clone_order_msg}"
+ echo " the object size of its parent image"
+ fi
+
+ true # Don't let the clones test spoil our return value
+}
+
+function image_dev_path() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+
+ if [ "${LOCAL_FILES}" = true ]; then
+ echo "${TEMP}/${image_name}"
+ return
+ fi
+
+ echo "/dev/rbd/rbd/${image_name}"
+}
+
+function out_data_dir() {
+ [ $# -lt 2 ] || exit 99
+ local out_data="${TEMP}/data"
+ local image_name
+
+ if [ $# -eq 1 ]; then
+ image_name="$1"
+ echo "${out_data}/${image_name}"
+ else
+ echo "${out_data}"
+ fi
+}
+
+function setup() {
+ verbose "===== setting up ====="
+ TEMP=$(mktemp -d /tmp/rbd_image_read.XXXXX)
+ mkdir -p $(out_data_dir)
+
+ # create and fill the original image with some data
+ create_image "${ORIGINAL}"
+ map_image "${ORIGINAL}"
+ fill_original
+
+ # create a snapshot of the original
+ create_image_snap "${ORIGINAL}" "${SNAP1}"
+ map_image_snap "${ORIGINAL}" "${SNAP1}"
+
+ if [ "${TEST_CLONES}" = true ]; then
+ # create a clone of the original snapshot
+ create_snap_clone "${ORIGINAL}" "${SNAP1}" \
+ "${CLONE1}" "${CLONE1_ORDER}"
+ map_image "${CLONE1}"
+
+ # create a snapshot of that clone
+ create_image_snap "${CLONE1}" "${SNAP2}"
+ map_image_snap "${CLONE1}" "${SNAP2}"
+
+ # create a clone of that clone's snapshot
+ create_snap_clone "${CLONE1}" "${SNAP2}" \
+ "${CLONE2}" "${CLONE2_ORDER}"
+ map_image "${CLONE2}"
+ fi
+}
+
+function teardown() {
+ verbose "===== cleaning up ====="
+ if [ "${TEST_CLONES}" = true ]; then
+ unmap_image "${CLONE2}" || true
+ destroy_snap_clone "${CLONE1}" "${SNAP2}" "${CLONE2}" || true
+
+ unmap_image_snap "${CLONE1}" "${SNAP2}" || true
+ destroy_image_snap "${CLONE1}" "${SNAP2}" || true
+
+ unmap_image "${CLONE1}" || true
+ destroy_snap_clone "${ORIGINAL}" "${SNAP1}" "${CLONE1}" || true
+ fi
+ unmap_image_snap "${ORIGINAL}" "${SNAP1}" || true
+ destroy_image_snap "${ORIGINAL}" "${SNAP1}" || true
+ unmap_image "${ORIGINAL}" || true
+ destroy_image "${ORIGINAL}" || true
+
+ rm -rf $(out_data_dir)
+ rmdir "${TEMP}"
+}
+
+function create_image() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+ local image_path
+ local bytes
+
+ verbose "creating image \"${image_name}\""
+ if [ "${LOCAL_FILES}" = true ]; then
+ image_path=$(image_dev_path "${image_name}")
+ bytes=$(echo "${IMAGE_SIZE} * 1024 * 1024 - 1" | bc)
+ quiet dd if=/dev/zero bs=1 count=1 seek="${bytes}" \
+ of="${image_path}"
+ return
+ fi
+
+ rbd create "${image_name}" --image-format "${FORMAT}" \
+ --size "${IMAGE_SIZE}" --order "${OBJECT_ORDER}" \
+ --image-shared
+}
+
+function destroy_image() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+ local image_path
+
+ verbose "destroying image \"${image_name}\""
+ if [ "${LOCAL_FILES}" = true ]; then
+ image_path=$(image_dev_path "${image_name}")
+ rm -f "${image_path}"
+ return
+ fi
+
+ rbd rm "${image_name}"
+}
+
+function map_image() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1" # can be image@snap too
+
+ if [ "${LOCAL_FILES}" = true ]; then
+ return
+ fi
+
+ sudo rbd map "${image_name}"
+}
+
+function unmap_image() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1" # can be image@snap too
+ local image_path
+
+ if [ "${LOCAL_FILES}" = true ]; then
+ return
+ fi
+ image_path=$(image_dev_path "${image_name}")
+
+ if [ -e "${image_path}" ]; then
+ sudo rbd unmap "${image_path}"
+ fi
+}
+
+function map_image_snap() {
+ [ $# -eq 2 ] || exit 99
+ local image_name="$1"
+ local snap_name="$2"
+ local image_snap
+
+ if [ "${LOCAL_FILES}" = true ]; then
+ return
+ fi
+
+ image_snap="${image_name}@${snap_name}"
+ map_image "${image_snap}"
+}
+
+function unmap_image_snap() {
+ [ $# -eq 2 ] || exit 99
+ local image_name="$1"
+ local snap_name="$2"
+ local image_snap
+
+ if [ "${LOCAL_FILES}" = true ]; then
+ return
+ fi
+
+ image_snap="${image_name}@${snap_name}"
+ unmap_image "${image_snap}"
+}
+
+function create_image_snap() {
+ [ $# -eq 2 ] || exit 99
+ local image_name="$1"
+ local snap_name="$2"
+ local image_snap="${image_name}@${snap_name}"
+ local image_path
+ local snap_path
+
+ verbose "creating snapshot \"${snap_name}\"" \
+ "of image \"${image_name}\""
+ if [ "${LOCAL_FILES}" = true ]; then
+ image_path=$(image_dev_path "${image_name}")
+ snap_path=$(image_dev_path "${image_snap}")
+
+ cp "${image_path}" "${snap_path}"
+ return
+ fi
+
+ rbd snap create "${image_snap}"
+}
+
+function destroy_image_snap() {
+ [ $# -eq 2 ] || exit 99
+ local image_name="$1"
+ local snap_name="$2"
+ local image_snap="${image_name}@${snap_name}"
+ local snap_path
+
+ verbose "destroying snapshot \"${snap_name}\"" \
+ "of image \"${image_name}\""
+ if [ "${LOCAL_FILES}" = true ]; then
+ snap_path=$(image_dev_path "${image_snap}")
+ rm -rf "${snap_path}"
+ return
+ fi
+
+ rbd snap rm "${image_snap}"
+}
+
+function create_snap_clone() {
+ [ $# -eq 4 ] || exit 99
+ local image_name="$1"
+ local snap_name="$2"
+ local clone_name="$3"
+ local clone_order="$4"
+ local image_snap="${image_name}@${snap_name}"
+ local snap_path
+ local clone_path
+
+ verbose "creating clone image \"${clone_name}\"" \
+ "of image snapshot \"${image_name}@${snap_name}\""
+ if [ "${LOCAL_FILES}" = true ]; then
+ snap_path=$(image_dev_path "${image_name}@${snap_name}")
+ clone_path=$(image_dev_path "${clone_name}")
+
+ cp "${snap_path}" "${clone_path}"
+ return
+ fi
+
+ rbd snap protect "${image_snap}"
+ rbd clone --order "${clone_order}" --image-shared \
+ "${image_snap}" "${clone_name}"
+}
+
+function destroy_snap_clone() {
+ [ $# -eq 3 ] || exit 99
+ local image_name="$1"
+ local snap_name="$2"
+ local clone_name="$3"
+ local image_snap="${image_name}@${snap_name}"
+ local clone_path
+
+ verbose "destroying clone image \"${clone_name}\""
+ if [ "${LOCAL_FILES}" = true ]; then
+ clone_path=$(image_dev_path "${clone_name}")
+
+ rm -rf "${clone_path}"
+ return
+ fi
+
+ rbd rm "${clone_name}"
+ rbd snap unprotect "${image_snap}"
+}
+
+# function that produces "random" data with which to fill the image
+function source_data() {
+ while quiet dd if=/bin/bash skip=$(($$ % 199)) bs="${PAGE_SIZE}"; do
+ : # Just do the dd
+ done
+}
+
+function fill_original() {
+ local image_path=$(image_dev_path "${ORIGINAL}")
+
+ verbose "filling original image"
+ # Fill 16 objects worth of "random" data
+ source_data |
+ quiet dd bs="${PAGE_SIZE}" count=$((16 * OBJECT_PAGES)) \
+ of="${image_path}"
+}
+
+function do_read() {
+ [ $# -eq 3 -o $# -eq 4 ] || exit 99
+ local image_name="$1"
+ local offset="$2"
+ local length="$3"
+ [ "${length}" -gt 0 ] || err "do_read: length must be non-zero"
+ local image_path=$(image_dev_path "${image_name}")
+ local out_data=$(out_data_dir "${image_name}")
+ local range=$(printf "%06u~%04u" "${offset}" "${length}")
+ local out_file
+
+ [ $# -eq 4 ] && offset=$((offset + 16 * OBJECT_PAGES))
+
+ verbose "reading \"${image_name}\" pages ${range}"
+
+ out_file="${out_data}/pages_${range}"
+
+ quiet dd bs="${PAGE_SIZE}" skip="${offset}" count="${length}" \
+ if="${image_path}" of="${out_file}"
+}
+
+function one_pass() {
+ [ $# -eq 1 -o $# -eq 2 ] || exit 99
+ local image_name="$1"
+ local extended
+ [ $# -eq 2 ] && extended="true"
+ local offset
+ local length
+
+ offset=0
+
+ # +-----------+-----------+---
+ # |X:X:X...X:X| : : ... : | :
+ # +-----------+-----------+---
+ length="${OBJECT_PAGES}"
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+---
+ # : |X: : ... : | :
+ # ---+-----------+---
+ length=1
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+---
+ # : | :X: ... : | :
+ # ---+-----------+---
+ length=1
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+---
+ # : | : :X...X: | :
+ # ---+-----------+---
+ length=$((OBJECT_PAGES - 3))
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+---
+ # : | : : ... :X| :
+ # ---+-----------+---
+ length=1
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+---
+ # : |X:X:X...X:X| :
+ # ---+-----------+---
+ length="${OBJECT_PAGES}"
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ offset=$((offset + 1)) # skip 1
+
+ # ---+-----------+---
+ # : | :X:X...X:X| :
+ # ---+-----------+---
+ length=$((OBJECT_PAGES - 1))
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+-----------+---
+ # : |X:X:X...X:X|X: : ... : | :
+ # ---+-----------+-----------+---
+ length=$((OBJECT_PAGES + 1))
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+-----------+---
+ # : | :X:X...X:X|X: : ... : | :
+ # ---+-----------+-----------+---
+ length="${OBJECT_PAGES}"
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+-----------+---
+ # : | :X:X...X:X|X:X: ... : | :
+ # ---+-----------+-----------+---
+ length=$((OBJECT_PAGES + 1))
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+-----------+---
+ # : | : :X...X:X|X:X:X...X:X| :
+ # ---+-----------+-----------+---
+ length=$((2 * OBJECT_PAGES + 2))
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ offset=$((offset + 1)) # skip 1
+
+ # ---+-----------+-----------+-----
+ # : | :X:X...X:X|X:X:X...X:X|X: :
+ # ---+-----------+-----------+-----
+ length=$((2 * OBJECT_PAGES))
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # --+-----------+-----------+--------
+ # : | :X:X...X:X|X:X:X...X:X|X:X: :
+ # --+-----------+-----------+--------
+ length=2049
+ length=$((2 * OBJECT_PAGES + 1))
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ # offset=$((offset + length))
+}
+
+function run_using() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+ local out_data=$(out_data_dir "${image_name}")
+
+ verbose "===== running using \"${image_name}\" ====="
+ mkdir -p "${out_data}"
+ one_pass "${image_name}"
+ one_pass "${image_name}" extended
+}
+
+function compare() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+ local out_data=$(out_data_dir "${image_name}")
+ local original=$(out_data_dir "${ORIGINAL}")
+
+ verbose "===== comparing \"${image_name}\" ====="
+ for i in $(ls "${original}"); do
+ verbose compare "\"${image_name}\" \"${i}\""
+ cmp "${original}/${i}" "${out_data}/${i}"
+ done
+ [ "${image_name}" = "${ORIGINAL}" ] || rm -rf "${out_data}"
+}
+
+function doit() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+
+ run_using "${image_name}"
+ compare "${image_name}"
+}
+
+########## Start
+
+parseargs "$@"
+
+trap teardown EXIT HUP INT
+setup
+
+run_using "${ORIGINAL}"
+doit "${ORIGINAL}@${SNAP1}"
+if [ "${TEST_CLONES}" = true ]; then
+ doit "${CLONE1}"
+ doit "${CLONE1}@${SNAP2}"
+ doit "${CLONE2}"
+fi
+rm -rf $(out_data_dir "${ORIGINAL}")
+
+echo "Success!"
+
+exit 0
diff --git a/qa/workunits/rbd/import_export.sh b/qa/workunits/rbd/import_export.sh
new file mode 100755
index 000000000..89e8d35cf
--- /dev/null
+++ b/qa/workunits/rbd/import_export.sh
@@ -0,0 +1,259 @@
+#!/bin/sh -ex
+
+# V1 image unsupported but required for testing purposes
+export RBD_FORCE_ALLOW_V1=1
+
+# returns data pool for a given image
+get_image_data_pool () {
+ image=$1
+ data_pool=$(rbd info $image | grep "data_pool: " | awk -F':' '{ print $NF }')
+ if [ -z $data_pool ]; then
+ data_pool='rbd'
+ fi
+
+ echo $data_pool
+}
+
+# return list of object numbers populated in image
+objects () {
+ image=$1
+ prefix=$(rbd info $image | grep block_name_prefix | awk '{print $NF;}')
+
+ # strip off prefix and leading zeros from objects; sort, although
+ # it doesn't necessarily make sense as they're hex, at least it makes
+ # the list repeatable and comparable
+ objects=$(rados ls -p $(get_image_data_pool $image) | grep $prefix | \
+ sed -e 's/'$prefix'\.//' -e 's/^0*\([0-9a-f]\)/\1/' | sort -u)
+ echo $objects
+}
+
+# return false if either files don't compare or their ondisk
+# sizes don't compare
+
+compare_files_and_ondisk_sizes () {
+ cmp -l $1 $2 || return 1
+ origsize=$(stat $1 --format %b)
+ exportsize=$(stat $2 --format %b)
+ difference=$(($exportsize - $origsize))
+ difference=${difference#-} # absolute value
+ test $difference -ge 0 -a $difference -lt 4096
+}
+
+TMPDIR=/tmp/rbd_import_export_$$
+rm -rf $TMPDIR
+mkdir $TMPDIR
+trap "rm -rf $TMPDIR" INT TERM EXIT
+
+# cannot import a dir
+mkdir foo.$$
+rbd import foo.$$ foo.dir && exit 1 || true # should fail
+rmdir foo.$$
+
+# create a sparse file
+dd if=/bin/sh of=${TMPDIR}/img bs=1k count=1 seek=10
+dd if=/bin/dd of=${TMPDIR}/img bs=1k count=10 seek=100
+dd if=/bin/rm of=${TMPDIR}/img bs=1k count=100 seek=1000
+dd if=/bin/ls of=${TMPDIR}/img bs=1k seek=10000
+dd if=/bin/ln of=${TMPDIR}/img bs=1k seek=100000
+dd if=/bin/grep of=${TMPDIR}/img bs=1k seek=1000000
+
+rbd rm testimg || true
+
+rbd import $RBD_CREATE_ARGS ${TMPDIR}/img testimg
+rbd export testimg ${TMPDIR}/img2
+rbd export testimg - > ${TMPDIR}/img3
+rbd rm testimg
+cmp ${TMPDIR}/img ${TMPDIR}/img2
+cmp ${TMPDIR}/img ${TMPDIR}/img3
+rm ${TMPDIR}/img2 ${TMPDIR}/img3
+
+# try again, importing from stdin
+rbd import $RBD_CREATE_ARGS - testimg < ${TMPDIR}/img
+rbd export testimg ${TMPDIR}/img2
+rbd export testimg - > ${TMPDIR}/img3
+rbd rm testimg
+cmp ${TMPDIR}/img ${TMPDIR}/img2
+cmp ${TMPDIR}/img ${TMPDIR}/img3
+
+rm ${TMPDIR}/img ${TMPDIR}/img2 ${TMPDIR}/img3
+
+if rbd help export | grep -q export-format; then
+ # try with --export-format for snapshots
+ dd if=/bin/dd of=${TMPDIR}/img bs=1k count=10 seek=100
+ rbd import $RBD_CREATE_ARGS ${TMPDIR}/img testimg
+ rbd snap create testimg@snap
+ rbd image-meta set testimg key1 value1
+ IMAGEMETA_BEFORE=`rbd image-meta list testimg`
+ rbd export --export-format 2 testimg ${TMPDIR}/img_v2
+ rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
+ rbd info testimg_import
+ rbd info testimg_import@snap
+ IMAGEMETA_AFTER=`rbd image-meta list testimg_import`
+ [ "$IMAGEMETA_BEFORE" = "$IMAGEMETA_AFTER" ]
+
+ # compare the contents between testimg and testimg_import
+ rbd export testimg_import ${TMPDIR}/img_import
+ compare_files_and_ondisk_sizes ${TMPDIR}/img ${TMPDIR}/img_import
+
+ rbd export testimg@snap ${TMPDIR}/img_snap
+ rbd export testimg_import@snap ${TMPDIR}/img_snap_import
+ compare_files_and_ondisk_sizes ${TMPDIR}/img_snap ${TMPDIR}/img_snap_import
+
+ rm ${TMPDIR}/img_v2
+ rm ${TMPDIR}/img_import
+ rm ${TMPDIR}/img_snap
+ rm ${TMPDIR}/img_snap_import
+
+ rbd snap rm testimg_import@snap
+ rbd remove testimg_import
+ rbd snap rm testimg@snap
+ rbd rm testimg
+
+ # order
+ rbd import --order 20 ${TMPDIR}/img testimg
+ rbd export --export-format 2 testimg ${TMPDIR}/img_v2
+ rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
+ rbd info testimg_import|grep order|awk '{print $2}'|grep 20
+
+ rm ${TMPDIR}/img_v2
+
+ rbd remove testimg_import
+ rbd remove testimg
+
+ # features
+ rbd import --image-feature layering ${TMPDIR}/img testimg
+ FEATURES_BEFORE=`rbd info testimg|grep features`
+ rbd export --export-format 2 testimg ${TMPDIR}/img_v2
+ rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
+ FEATURES_AFTER=`rbd info testimg_import|grep features`
+ if [ "$FEATURES_BEFORE" != "$FEATURES_AFTER" ]; then
+ false
+ fi
+
+ rm ${TMPDIR}/img_v2
+
+ rbd remove testimg_import
+ rbd remove testimg
+
+ # stripe
+ rbd import --stripe-count 1000 --stripe-unit 4096 ${TMPDIR}/img testimg
+ rbd export --export-format 2 testimg ${TMPDIR}/img_v2
+ rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
+ rbd info testimg_import|grep "stripe unit"|grep -Ei '(4 KiB|4096)'
+ rbd info testimg_import|grep "stripe count"|awk '{print $3}'|grep 1000
+
+ rm ${TMPDIR}/img_v2
+
+ rbd remove testimg_import
+ rbd remove testimg
+
+ # snap protect
+ rbd import --image-format=2 ${TMPDIR}/img testimg
+ rbd snap create testimg@snap1
+ rbd snap create testimg@snap2
+ rbd snap protect testimg@snap2
+ rbd export --export-format 2 testimg ${TMPDIR}/snap_protect
+ rbd import --export-format 2 ${TMPDIR}/snap_protect testimg_import
+ rbd info testimg_import@snap1 | grep 'protected: False'
+ rbd info testimg_import@snap2 | grep 'protected: True'
+
+ rm ${TMPDIR}/snap_protect
+
+ rbd snap unprotect testimg@snap2
+ rbd snap unprotect testimg_import@snap2
+ rbd snap purge testimg
+ rbd snap purge testimg_import
+ rbd remove testimg
+ rbd remove testimg_import
+fi
+
+tiered=0
+if ceph osd dump | grep ^pool | grep "'rbd'" | grep tier; then
+ tiered=1
+fi
+
+# create specifically sparse files
+# 1 1M block of sparse, 1 1M block of random
+dd if=/dev/urandom bs=1M seek=1 count=1 of=${TMPDIR}/sparse1
+
+# 1 1M block of random, 1 1M block of sparse
+dd if=/dev/urandom bs=1M count=1 of=${TMPDIR}/sparse2; truncate ${TMPDIR}/sparse2 -s 2M
+
+# 1M-block images; validate resulting blocks
+
+# 1M sparse, 1M data
+rbd rm sparse1 || true
+rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse1
+rbd ls -l | grep sparse1 | grep -Ei '(2 MiB|2048k)'
+[ $tiered -eq 1 -o "$(objects sparse1)" = '1' ]
+
+# export, compare contents and on-disk size
+rbd export sparse1 ${TMPDIR}/sparse1.out
+compare_files_and_ondisk_sizes ${TMPDIR}/sparse1 ${TMPDIR}/sparse1.out
+rm ${TMPDIR}/sparse1.out
+rbd rm sparse1
+
+# 1M data, 1M sparse
+rbd rm sparse2 || true
+rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse2
+rbd ls -l | grep sparse2 | grep -Ei '(2 MiB|2048k)'
+[ $tiered -eq 1 -o "$(objects sparse2)" = '0' ]
+rbd export sparse2 ${TMPDIR}/sparse2.out
+compare_files_and_ondisk_sizes ${TMPDIR}/sparse2 ${TMPDIR}/sparse2.out
+rm ${TMPDIR}/sparse2.out
+rbd rm sparse2
+
+# extend sparse1 to 10 1M blocks, sparse at the end
+truncate ${TMPDIR}/sparse1 -s 10M
+# import from stdin just for fun, verify still sparse
+rbd import $RBD_CREATE_ARGS --order 20 - sparse1 < ${TMPDIR}/sparse1
+rbd ls -l | grep sparse1 | grep -Ei '(10 MiB|10240k)'
+[ $tiered -eq 1 -o "$(objects sparse1)" = '1' ]
+rbd export sparse1 ${TMPDIR}/sparse1.out
+compare_files_and_ondisk_sizes ${TMPDIR}/sparse1 ${TMPDIR}/sparse1.out
+rm ${TMPDIR}/sparse1.out
+rbd rm sparse1
+
+# extend sparse2 to 4M total with two more nonsparse megs
+dd if=/dev/urandom bs=2M count=1 of=${TMPDIR}/sparse2 oflag=append conv=notrunc
+# again from stding
+rbd import $RBD_CREATE_ARGS --order 20 - sparse2 < ${TMPDIR}/sparse2
+rbd ls -l | grep sparse2 | grep -Ei '(4 MiB|4096k)'
+[ $tiered -eq 1 -o "$(objects sparse2)" = '0 2 3' ]
+rbd export sparse2 ${TMPDIR}/sparse2.out
+compare_files_and_ondisk_sizes ${TMPDIR}/sparse2 ${TMPDIR}/sparse2.out
+rm ${TMPDIR}/sparse2.out
+rbd rm sparse2
+
+# zeros import to a sparse image. Note: all zeros currently
+# doesn't work right now due to the way we handle 'empty' fiemaps;
+# the image ends up zero-filled.
+
+echo "partially-sparse file imports to partially-sparse image"
+rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse1 sparse
+[ $tiered -eq 1 -o "$(objects sparse)" = '1' ]
+rbd rm sparse
+
+echo "zeros import through stdin to sparse image"
+# stdin
+dd if=/dev/zero bs=1M count=4 | rbd import $RBD_CREATE_ARGS - sparse
+[ $tiered -eq 1 -o "$(objects sparse)" = '' ]
+rbd rm sparse
+
+echo "zeros export to sparse file"
+# Must be tricky to make image "by hand" ; import won't create a zero image
+rbd create $RBD_CREATE_ARGS sparse --size 4
+prefix=$(rbd info sparse | grep block_name_prefix | awk '{print $NF;}')
+# drop in 0 object directly
+dd if=/dev/zero bs=4M count=1 | rados -p $(get_image_data_pool sparse) \
+ put ${prefix}.000000000000 -
+[ $tiered -eq 1 -o "$(objects sparse)" = '0' ]
+# 1 object full of zeros; export should still create 0-disk-usage file
+rm ${TMPDIR}/sparse || true
+rbd export sparse ${TMPDIR}/sparse
+[ $(stat ${TMPDIR}/sparse --format=%b) = '0' ]
+rbd rm sparse
+
+rm ${TMPDIR}/sparse ${TMPDIR}/sparse1 ${TMPDIR}/sparse2 ${TMPDIR}/sparse3 || true
+
+echo OK
diff --git a/qa/workunits/rbd/issue-20295.sh b/qa/workunits/rbd/issue-20295.sh
new file mode 100755
index 000000000..3d617a066
--- /dev/null
+++ b/qa/workunits/rbd/issue-20295.sh
@@ -0,0 +1,18 @@
+#!/bin/sh -ex
+
+TEST_POOL=ecpool
+TEST_IMAGE=test1
+PGS=12
+
+ceph osd pool create $TEST_POOL $PGS $PGS erasure
+ceph osd pool application enable $TEST_POOL rbd
+ceph osd pool set $TEST_POOL allow_ec_overwrites true
+rbd --data-pool $TEST_POOL create --size 1024G $TEST_IMAGE
+rbd bench \
+ --io-type write \
+ --io-size 4096 \
+ --io-pattern=rand \
+ --io-total 100M \
+ $TEST_IMAGE
+
+echo "OK"
diff --git a/qa/workunits/rbd/journal.sh b/qa/workunits/rbd/journal.sh
new file mode 100755
index 000000000..ba89e75c9
--- /dev/null
+++ b/qa/workunits/rbd/journal.sh
@@ -0,0 +1,326 @@
+#!/usr/bin/env bash
+set -e
+
+. $(dirname $0)/../../standalone/ceph-helpers.sh
+
+function list_tests()
+{
+ echo "AVAILABLE TESTS"
+ for i in $TESTS; do
+ echo " $i"
+ done
+}
+
+function usage()
+{
+ echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...] [--no-cleanup]]"
+}
+
+function expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+function save_commit_position()
+{
+ local journal=$1
+
+ rados -p rbd getomapval journal.${journal} client_ \
+ $TMPDIR/${journal}.client_.omap
+}
+
+function restore_commit_position()
+{
+ local journal=$1
+
+ rados -p rbd setomapval journal.${journal} client_ \
+ < $TMPDIR/${journal}.client_.omap
+}
+
+test_rbd_journal()
+{
+ local image=testrbdjournal$$
+
+ rbd create --image-feature exclusive-lock --image-feature journaling \
+ --size 128 ${image}
+ local journal=$(rbd info ${image} --format=xml 2>/dev/null |
+ $XMLSTARLET sel -t -v "//image/journal")
+ test -n "${journal}"
+ rbd journal info ${journal}
+ rbd journal info --journal ${journal}
+ rbd journal info --image ${image}
+
+ rbd feature disable ${image} journaling
+
+ rbd info ${image} --format=xml 2>/dev/null |
+ expect_false $XMLSTARLET sel -t -v "//image/journal"
+ expect_false rbd journal info ${journal}
+ expect_false rbd journal info --image ${image}
+
+ rbd feature enable ${image} journaling
+
+ local journal1=$(rbd info ${image} --format=xml 2>/dev/null |
+ $XMLSTARLET sel -t -v "//image/journal")
+ test "${journal}" = "${journal1}"
+
+ rbd journal info ${journal}
+
+ rbd journal status ${journal}
+
+ local count=10
+ save_commit_position ${journal}
+ rbd bench --io-type write ${image} --io-size 4096 --io-threads 1 \
+ --io-total $((4096 * count)) --io-pattern seq
+ rbd journal status --image ${image} | fgrep "tid=$((count - 1))"
+ restore_commit_position ${journal}
+ rbd journal status --image ${image} | fgrep "positions=[]"
+ local count1=$(rbd journal inspect --verbose ${journal} |
+ grep -c 'event_type.*AioWrite')
+ test "${count}" -eq "${count1}"
+
+ rbd journal export ${journal} $TMPDIR/journal.export
+ local size=$(stat -c "%s" $TMPDIR/journal.export)
+ test "${size}" -gt 0
+
+ rbd export ${image} $TMPDIR/${image}.export
+
+ local image1=${image}1
+ rbd create --image-feature exclusive-lock --image-feature journaling \
+ --size 128 ${image1}
+ journal1=$(rbd info ${image1} --format=xml 2>/dev/null |
+ $XMLSTARLET sel -t -v "//image/journal")
+
+ save_commit_position ${journal1}
+ rbd journal import --dest ${image1} $TMPDIR/journal.export
+ rbd snap create ${image1}@test
+ restore_commit_position ${journal1}
+ # check that commit position is properly updated: the journal should contain
+ # 14 entries (2 AioFlush + 10 AioWrite + 1 SnapCreate + 1 OpFinish) and
+ # commit position set to tid=14
+ rbd journal inspect --image ${image1} --verbose | awk '
+ /AioFlush/ {a++} # match: "event_type": "AioFlush",
+ /AioWrite/ {w++} # match: "event_type": "AioWrite",
+ /SnapCreate/ {s++} # match: "event_type": "SnapCreate",
+ /OpFinish/ {f++} # match: "event_type": "OpFinish",
+ /entries inspected/ {t=$1; e=$4} # match: 14 entries inspected, 0 errors
+ {print} # for diagnostic
+ END {
+ if (a != 2 || w != 10 || s != 1 || f != 1 || t != 14 || e != 0) exit(1)
+ }
+ '
+
+ rbd export ${image1}@test $TMPDIR/${image1}.export
+ cmp $TMPDIR/${image}.export $TMPDIR/${image1}.export
+
+ rbd journal reset ${journal}
+
+ rbd journal inspect --verbose ${journal} | expect_false grep 'event_type'
+
+ rbd snap purge ${image1}
+ rbd remove ${image1}
+ rbd remove ${image}
+}
+
+
+rbd_assert_eq() {
+ local image=$1
+ local cmd=$2
+ local param=$3
+ local expected_val=$4
+
+ local val=$(rbd --format xml ${cmd} --image ${image} |
+ $XMLSTARLET sel -t -v "${param}")
+ test "${val}" = "${expected_val}"
+}
+
+test_rbd_create()
+{
+ local image=testrbdcreate$$
+
+ rbd create --image-feature exclusive-lock --image-feature journaling \
+ --journal-pool rbd \
+ --journal-object-size 20M \
+ --journal-splay-width 6 \
+ --size 256 ${image}
+
+ rbd_assert_eq ${image} 'journal info' '//journal/order' 25
+ rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
+ rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
+
+ rbd remove ${image}
+}
+
+test_rbd_copy()
+{
+ local src=testrbdcopys$$
+ rbd create --size 256 ${src}
+
+ local image=testrbdcopy$$
+ rbd copy --image-feature exclusive-lock --image-feature journaling \
+ --journal-pool rbd \
+ --journal-object-size 20M \
+ --journal-splay-width 6 \
+ ${src} ${image}
+
+ rbd remove ${src}
+
+ rbd_assert_eq ${image} 'journal info' '//journal/order' 25
+ rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
+ rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
+
+ rbd remove ${image}
+}
+
+test_rbd_deep_copy()
+{
+ local src=testrbdcopys$$
+ rbd create --size 256 ${src}
+ rbd snap create ${src}@snap1
+
+ local dest=testrbdcopy$$
+ rbd deep copy --image-feature exclusive-lock --image-feature journaling \
+ --journal-pool rbd \
+ --journal-object-size 20M \
+ --journal-splay-width 6 \
+ ${src} ${dest}
+
+ rbd snap purge ${src}
+ rbd remove ${src}
+
+ rbd_assert_eq ${dest} 'journal info' '//journal/order' 25
+ rbd_assert_eq ${dest} 'journal info' '//journal/splay_width' 6
+ rbd_assert_eq ${dest} 'journal info' '//journal/object_pool' rbd
+
+ rbd snap purge ${dest}
+ rbd remove ${dest}
+}
+
+test_rbd_clone()
+{
+ local parent=testrbdclonep$$
+ rbd create --image-feature layering --size 256 ${parent}
+ rbd snap create ${parent}@snap
+ rbd snap protect ${parent}@snap
+
+ local image=testrbdclone$$
+ rbd clone --image-feature layering --image-feature exclusive-lock --image-feature journaling \
+ --journal-pool rbd \
+ --journal-object-size 20M \
+ --journal-splay-width 6 \
+ ${parent}@snap ${image}
+
+ rbd_assert_eq ${image} 'journal info' '//journal/order' 25
+ rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
+ rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
+
+ rbd remove ${image}
+ rbd snap unprotect ${parent}@snap
+ rbd snap purge ${parent}
+ rbd remove ${parent}
+}
+
+test_rbd_import()
+{
+ local src=testrbdimports$$
+ rbd create --size 256 ${src}
+
+ rbd export ${src} $TMPDIR/${src}.export
+ rbd remove ${src}
+
+ local image=testrbdimport$$
+ rbd import --image-feature exclusive-lock --image-feature journaling \
+ --journal-pool rbd \
+ --journal-object-size 20M \
+ --journal-splay-width 6 \
+ $TMPDIR/${src}.export ${image}
+
+ rbd_assert_eq ${image} 'journal info' '//journal/order' 25
+ rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
+ rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
+
+ rbd remove ${image}
+}
+
+test_rbd_feature()
+{
+ local image=testrbdfeature$$
+
+ rbd create --image-feature exclusive-lock --size 256 ${image}
+
+ rbd feature enable ${image} journaling \
+ --journal-pool rbd \
+ --journal-object-size 20M \
+ --journal-splay-width 6
+
+ rbd_assert_eq ${image} 'journal info' '//journal/order' 25
+ rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
+ rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
+
+ rbd remove ${image}
+}
+
+TESTS+=" rbd_journal"
+TESTS+=" rbd_create"
+TESTS+=" rbd_copy"
+TESTS+=" rbd_clone"
+TESTS+=" rbd_import"
+TESTS+=" rbd_feature"
+
+#
+# "main" follows
+#
+
+tests_to_run=()
+
+cleanup=true
+
+while [[ $# -gt 0 ]]; do
+ opt=$1
+
+ case "$opt" in
+ "-l" )
+ do_list=1
+ ;;
+ "--no-cleanup" )
+ cleanup=false
+ ;;
+ "-t" )
+ shift
+ if [[ -z "$1" ]]; then
+ echo "missing argument to '-t'"
+ usage ;
+ exit 1
+ fi
+ tests_to_run+=" $1"
+ ;;
+ "-h" )
+ usage ;
+ exit 0
+ ;;
+ esac
+ shift
+done
+
+if [[ $do_list -eq 1 ]]; then
+ list_tests ;
+ exit 0
+fi
+
+TMPDIR=/tmp/rbd_journal$$
+mkdir $TMPDIR
+if $cleanup; then
+ trap "rm -fr $TMPDIR" 0
+fi
+
+if test -z "$tests_to_run" ; then
+ tests_to_run="$TESTS"
+fi
+
+for i in $tests_to_run; do
+ set -x
+ test_${i}
+ set +x
+done
+
+echo OK
diff --git a/qa/workunits/rbd/kernel.sh b/qa/workunits/rbd/kernel.sh
new file mode 100755
index 000000000..faa5760ee
--- /dev/null
+++ b/qa/workunits/rbd/kernel.sh
@@ -0,0 +1,100 @@
+#!/usr/bin/env bash
+set -ex
+
+CEPH_SECRET_FILE=${CEPH_SECRET_FILE:-}
+CEPH_ID=${CEPH_ID:-admin}
+SECRET_ARGS=''
+if [ ! -z $CEPH_SECRET_FILE ]; then
+ SECRET_ARGS="--secret $CEPH_SECRET_FILE"
+fi
+
+TMP_FILES="/tmp/img1 /tmp/img1.small /tmp/img1.snap1 /tmp/img1.export /tmp/img1.trunc"
+
+function expect_false() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+function get_device_dir {
+ local POOL=$1
+ local IMAGE=$2
+ local SNAP=$3
+ rbd device list | tail -n +2 | egrep "\s+$POOL\s+$IMAGE\s+$SNAP\s+" |
+ awk '{print $1;}'
+}
+
+function clean_up {
+ [ -e /dev/rbd/rbd/testimg1@snap1 ] &&
+ sudo rbd device unmap /dev/rbd/rbd/testimg1@snap1
+ if [ -e /dev/rbd/rbd/testimg1 ]; then
+ sudo rbd device unmap /dev/rbd/rbd/testimg1
+ rbd snap purge testimg1 || true
+ fi
+ rbd ls | grep testimg1 > /dev/null && rbd rm testimg1 || true
+ sudo rm -f $TMP_FILES
+}
+
+clean_up
+
+trap clean_up INT TERM EXIT
+
+# create an image
+dd if=/bin/sh of=/tmp/img1 bs=1k count=1 seek=10
+dd if=/bin/dd of=/tmp/img1 bs=1k count=10 seek=100
+dd if=/bin/rm of=/tmp/img1 bs=1k count=100 seek=1000
+dd if=/bin/ls of=/tmp/img1 bs=1k seek=10000
+dd if=/bin/ln of=/tmp/img1 bs=1k seek=100000
+dd if=/dev/zero of=/tmp/img1 count=0 seek=150000
+
+# import
+rbd import /tmp/img1 testimg1
+sudo rbd device map testimg1 --user $CEPH_ID $SECRET_ARGS
+
+DEV_ID1=$(get_device_dir rbd testimg1 -)
+echo "dev_id1 = $DEV_ID1"
+cat /sys/bus/rbd/devices/$DEV_ID1/size
+cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 76800000
+
+sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.export
+cmp /tmp/img1 /tmp/img1.export
+
+# snapshot
+rbd snap create testimg1 --snap=snap1
+sudo rbd device map --snap=snap1 testimg1 --user $CEPH_ID $SECRET_ARGS
+
+DEV_ID2=$(get_device_dir rbd testimg1 snap1)
+cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000
+
+sudo dd if=/dev/rbd/rbd/testimg1@snap1 of=/tmp/img1.snap1
+cmp /tmp/img1 /tmp/img1.snap1
+
+# resize
+rbd resize testimg1 --size=40 --allow-shrink
+cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 41943040
+cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000
+
+sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.small
+cp /tmp/img1 /tmp/img1.trunc
+truncate -s 41943040 /tmp/img1.trunc
+cmp /tmp/img1.trunc /tmp/img1.small
+
+# rollback expects an unlocked image
+# (acquire and) release the lock as a side effect
+rbd bench --io-type read --io-size 1 --io-threads 1 --io-total 1 testimg1
+
+# rollback and check data again
+rbd snap rollback --snap=snap1 testimg1
+cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 76800000
+cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000
+sudo rm -f /tmp/img1.snap1 /tmp/img1.export
+
+sudo dd if=/dev/rbd/rbd/testimg1@snap1 of=/tmp/img1.snap1
+cmp /tmp/img1 /tmp/img1.snap1
+sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.export
+cmp /tmp/img1 /tmp/img1.export
+
+# zeros are returned if an image or a snapshot is removed
+expect_false cmp -n 76800000 /dev/rbd/rbd/testimg1@snap1 /dev/zero
+rbd snap rm --snap=snap1 testimg1
+cmp -n 76800000 /dev/rbd/rbd/testimg1@snap1 /dev/zero
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_data_pool.sh b/qa/workunits/rbd/krbd_data_pool.sh
new file mode 100755
index 000000000..8eada88bb
--- /dev/null
+++ b/qa/workunits/rbd/krbd_data_pool.sh
@@ -0,0 +1,206 @@
+#!/usr/bin/env bash
+
+set -ex
+
+export RBD_FORCE_ALLOW_V1=1
+
+function fill_image() {
+ local spec=$1
+
+ local dev
+ dev=$(sudo rbd map $spec)
+ xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 -W 0 $IMAGE_SIZE" $dev
+ sudo rbd unmap $dev
+}
+
+function create_clones() {
+ local spec=$1
+
+ rbd snap create $spec@snap
+ rbd snap protect $spec@snap
+
+ local pool=${spec%/*} # pool/image is assumed
+ local image=${spec#*/}
+ local child_pool
+ for child_pool in $pool clonesonly; do
+ rbd clone $spec@snap $child_pool/$pool-$image-clone1
+ rbd clone $spec@snap --data-pool repdata $child_pool/$pool-$image-clone2
+ rbd clone $spec@snap --data-pool ecdata $child_pool/$pool-$image-clone3
+ done
+}
+
+function trigger_copyup() {
+ local spec=$1
+
+ local dev
+ dev=$(sudo rbd map $spec)
+ local i
+ {
+ for ((i = 0; i < $NUM_OBJECTS; i++)); do
+ echo pwrite -b $OBJECT_SIZE -S 0x59 $((i * OBJECT_SIZE + OBJECT_SIZE / 2)) $((OBJECT_SIZE / 2))
+ done
+ echo fsync
+ echo quit
+ } | xfs_io $dev
+ sudo rbd unmap $dev
+}
+
+function compare() {
+ local spec=$1
+ local object=$2
+
+ local dev
+ dev=$(sudo rbd map $spec)
+ local i
+ for ((i = 0; i < $NUM_OBJECTS; i++)); do
+ dd if=$dev bs=$OBJECT_SIZE count=1 skip=$i | cmp $object -
+ done
+ sudo rbd unmap $dev
+}
+
+function mkfs_and_mount() {
+ local spec=$1
+
+ local dev
+ dev=$(sudo rbd map $spec)
+ blkdiscard $dev
+ mkfs.ext4 -q -E nodiscard $dev
+ sudo mount $dev /mnt
+ sudo umount /mnt
+ sudo rbd unmap $dev
+}
+
+function list_HEADs() {
+ local pool=$1
+
+ rados -p $pool ls | while read obj; do
+ if rados -p $pool stat $obj >/dev/null 2>&1; then
+ echo $obj
+ fi
+ done
+}
+
+function count_data_objects() {
+ local spec=$1
+
+ local pool
+ pool=$(rbd info $spec | grep 'data_pool: ' | awk '{ print $NF }')
+ if [[ -z $pool ]]; then
+ pool=${spec%/*} # pool/image is assumed
+ fi
+
+ local prefix
+ prefix=$(rbd info $spec | grep 'block_name_prefix: ' | awk '{ print $NF }')
+ rados -p $pool ls | grep -c $prefix
+}
+
+function get_num_clones() {
+ local pool=$1
+
+ rados -p $pool --format=json df |
+ python3 -c 'import sys, json; print(json.load(sys.stdin)["pools"][0]["num_object_clones"])'
+}
+
+ceph osd pool create repdata 24 24
+rbd pool init repdata
+ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
+ceph osd pool create ecdata 24 24 erasure teuthologyprofile
+rbd pool init ecdata
+ceph osd pool set ecdata allow_ec_overwrites true
+ceph osd pool create rbdnonzero 24 24
+rbd pool init rbdnonzero
+ceph osd pool create clonesonly 24 24
+rbd pool init clonesonly
+
+for pool in rbd rbdnonzero; do
+ rbd create --size 200 --image-format 1 $pool/img0
+ rbd create --size 200 $pool/img1
+ rbd create --size 200 --data-pool repdata $pool/img2
+ rbd create --size 200 --data-pool ecdata $pool/img3
+done
+
+IMAGE_SIZE=$(rbd info --format=json img1 | python3 -c 'import sys, json; print(json.load(sys.stdin)["size"])')
+OBJECT_SIZE=$(rbd info --format=json img1 | python3 -c 'import sys, json; print(json.load(sys.stdin)["object_size"])')
+NUM_OBJECTS=$((IMAGE_SIZE / OBJECT_SIZE))
+[[ $((IMAGE_SIZE % OBJECT_SIZE)) -eq 0 ]]
+
+OBJECT_X=$(mktemp) # xxxx
+xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 0 $OBJECT_SIZE" $OBJECT_X
+
+OBJECT_XY=$(mktemp) # xxYY
+xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 0 $((OBJECT_SIZE / 2))" \
+ -c "pwrite -b $OBJECT_SIZE -S 0x59 $((OBJECT_SIZE / 2)) $((OBJECT_SIZE / 2))" \
+ $OBJECT_XY
+
+for pool in rbd rbdnonzero; do
+ for i in {0..3}; do
+ fill_image $pool/img$i
+ if [[ $i -ne 0 ]]; then
+ create_clones $pool/img$i
+ for child_pool in $pool clonesonly; do
+ for j in {1..3}; do
+ trigger_copyup $child_pool/$pool-img$i-clone$j
+ done
+ done
+ fi
+ done
+done
+
+# rbd_directory, rbd_children, rbd_info + img0 header + ...
+NUM_META_RBDS=$((3 + 1 + 3 * (1*2 + 3*2)))
+# rbd_directory, rbd_children, rbd_info + ...
+NUM_META_CLONESONLY=$((3 + 2 * 3 * (3*2)))
+
+[[ $(rados -p rbd ls | wc -l) -eq $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
+[[ $(rados -p repdata ls | wc -l) -eq $((1 + 14 * NUM_OBJECTS)) ]]
+[[ $(rados -p ecdata ls | wc -l) -eq $((1 + 14 * NUM_OBJECTS)) ]]
+[[ $(rados -p rbdnonzero ls | wc -l) -eq $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
+[[ $(rados -p clonesonly ls | wc -l) -eq $((NUM_META_CLONESONLY + 6 * NUM_OBJECTS)) ]]
+
+for pool in rbd rbdnonzero; do
+ for i in {0..3}; do
+ [[ $(count_data_objects $pool/img$i) -eq $NUM_OBJECTS ]]
+ if [[ $i -ne 0 ]]; then
+ for child_pool in $pool clonesonly; do
+ for j in {1..3}; do
+ [[ $(count_data_objects $child_pool/$pool-img$i-clone$j) -eq $NUM_OBJECTS ]]
+ done
+ done
+ fi
+ done
+done
+
+[[ $(get_num_clones rbd) -eq 0 ]]
+[[ $(get_num_clones repdata) -eq 0 ]]
+[[ $(get_num_clones ecdata) -eq 0 ]]
+[[ $(get_num_clones rbdnonzero) -eq 0 ]]
+[[ $(get_num_clones clonesonly) -eq 0 ]]
+
+for pool in rbd rbdnonzero; do
+ for i in {0..3}; do
+ compare $pool/img$i $OBJECT_X
+ mkfs_and_mount $pool/img$i
+ if [[ $i -ne 0 ]]; then
+ for child_pool in $pool clonesonly; do
+ for j in {1..3}; do
+ compare $child_pool/$pool-img$i-clone$j $OBJECT_XY
+ done
+ done
+ fi
+ done
+done
+
+# mkfs_and_mount should discard some objects everywhere but in clonesonly
+[[ $(list_HEADs rbd | wc -l) -lt $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
+[[ $(list_HEADs repdata | wc -l) -lt $((1 + 14 * NUM_OBJECTS)) ]]
+[[ $(list_HEADs ecdata | wc -l) -lt $((1 + 14 * NUM_OBJECTS)) ]]
+[[ $(list_HEADs rbdnonzero | wc -l) -lt $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
+[[ $(list_HEADs clonesonly | wc -l) -eq $((NUM_META_CLONESONLY + 6 * NUM_OBJECTS)) ]]
+
+[[ $(get_num_clones rbd) -eq $NUM_OBJECTS ]]
+[[ $(get_num_clones repdata) -eq $((2 * NUM_OBJECTS)) ]]
+[[ $(get_num_clones ecdata) -eq $((2 * NUM_OBJECTS)) ]]
+[[ $(get_num_clones rbdnonzero) -eq $NUM_OBJECTS ]]
+[[ $(get_num_clones clonesonly) -eq 0 ]]
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_exclusive_option.sh b/qa/workunits/rbd/krbd_exclusive_option.sh
new file mode 100755
index 000000000..f8493ce98
--- /dev/null
+++ b/qa/workunits/rbd/krbd_exclusive_option.sh
@@ -0,0 +1,233 @@
+#!/usr/bin/env bash
+
+set -ex
+
+function expect_false() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+function assert_locked() {
+ local dev_id="${1#/dev/rbd}"
+
+ local client_addr
+ client_addr="$(< $SYSFS_DIR/$dev_id/client_addr)"
+
+ local client_id
+ client_id="$(< $SYSFS_DIR/$dev_id/client_id)"
+ # client4324 -> client.4324
+ client_id="client.${client_id#client}"
+
+ local watch_cookie
+ watch_cookie="$(rados -p rbd listwatchers rbd_header.$IMAGE_ID |
+ grep $client_id | cut -d ' ' -f 3 | cut -d '=' -f 2)"
+ [[ $(echo -n "$watch_cookie" | grep -c '^') -eq 1 ]]
+
+ local actual
+ actual="$(rados -p rbd --format=json lock info rbd_header.$IMAGE_ID rbd_lock |
+ python3 -m json.tool --sort-keys)"
+
+ local expected
+ expected="$(cat <<EOF | python3 -m json.tool --sort-keys
+{
+ "lockers": [
+ {
+ "addr": "$client_addr",
+ "cookie": "auto $watch_cookie",
+ "description": "",
+ "expiration": "0.000000",
+ "name": "$client_id"
+ }
+ ],
+ "name": "rbd_lock",
+ "tag": "internal",
+ "type": "exclusive"
+}
+EOF
+ )"
+
+ [ "$actual" = "$expected" ]
+}
+
+function assert_unlocked() {
+ rados -p rbd --format=json lock info rbd_header.$IMAGE_ID rbd_lock |
+ grep '"lockers":\[\]'
+}
+
+function blocklist_add() {
+ local dev_id="${1#/dev/rbd}"
+
+ local client_addr
+ client_addr="$(< $SYSFS_DIR/$dev_id/client_addr)"
+
+ ceph osd blocklist add $client_addr
+}
+
+SYSFS_DIR="/sys/bus/rbd/devices"
+IMAGE_NAME="exclusive-option-test"
+
+rbd create --size 1 --image-feature '' $IMAGE_NAME
+
+IMAGE_ID="$(rbd info --format=json $IMAGE_NAME |
+ python3 -c "import sys, json; print(json.load(sys.stdin)['block_name_prefix'].split('.')[1])")"
+
+DEV=$(sudo rbd map $IMAGE_NAME)
+assert_unlocked
+sudo rbd unmap $DEV
+assert_unlocked
+
+expect_false sudo rbd map -o exclusive $IMAGE_NAME
+assert_unlocked
+
+expect_false sudo rbd map -o lock_on_read $IMAGE_NAME
+assert_unlocked
+
+rbd feature enable $IMAGE_NAME exclusive-lock
+rbd snap create $IMAGE_NAME@snap
+
+DEV=$(sudo rbd map $IMAGE_NAME)
+assert_locked $DEV
+[[ $(blockdev --getro $DEV) -eq 0 ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map $IMAGE_NAME@snap)
+assert_unlocked
+[[ $(blockdev --getro $DEV) -eq 1 ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o ro $IMAGE_NAME)
+assert_unlocked
+[[ $(blockdev --getro $DEV) -eq 1 ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
+assert_locked $DEV
+[[ $(blockdev --getro $DEV) -eq 0 ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o exclusive $IMAGE_NAME@snap)
+assert_unlocked
+[[ $(blockdev --getro $DEV) -eq 1 ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o exclusive,ro $IMAGE_NAME)
+assert_unlocked
+[[ $(blockdev --getro $DEV) -eq 1 ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+# alternate syntax
+DEV=$(sudo rbd map --exclusive --read-only $IMAGE_NAME)
+assert_unlocked
+[[ $(blockdev --getro $DEV) -eq 1 ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map $IMAGE_NAME)
+assert_locked $DEV
+OTHER_DEV=$(sudo rbd map -o noshare $IMAGE_NAME)
+assert_locked $OTHER_DEV
+dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
+assert_locked $DEV
+dd if=/dev/urandom of=$OTHER_DEV bs=4k count=10 oflag=direct
+assert_locked $OTHER_DEV
+sudo rbd unmap $DEV
+sudo rbd unmap $OTHER_DEV
+assert_unlocked
+
+DEV=$(sudo rbd map $IMAGE_NAME)
+assert_locked $DEV
+OTHER_DEV=$(sudo rbd map -o noshare,exclusive $IMAGE_NAME)
+assert_locked $OTHER_DEV
+dd if=$DEV of=/dev/null bs=4k count=10 iflag=direct
+expect_false dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
+assert_locked $OTHER_DEV
+sudo rbd unmap $OTHER_DEV
+assert_unlocked
+dd if=$DEV of=/dev/null bs=4k count=10 iflag=direct
+assert_unlocked
+dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
+assert_locked $DEV
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o lock_on_read $IMAGE_NAME)
+assert_locked $DEV
+OTHER_DEV=$(sudo rbd map -o noshare,exclusive $IMAGE_NAME)
+assert_locked $OTHER_DEV
+expect_false dd if=$DEV of=/dev/null bs=4k count=10 iflag=direct
+expect_false dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
+sudo udevadm settle
+assert_locked $OTHER_DEV
+sudo rbd unmap $OTHER_DEV
+assert_unlocked
+dd if=$DEV of=/dev/null bs=4k count=10 iflag=direct
+assert_locked $DEV
+dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
+assert_locked $DEV
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
+assert_locked $DEV
+expect_false sudo rbd map -o noshare $IMAGE_NAME
+assert_locked $DEV
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
+assert_locked $DEV
+expect_false sudo rbd map -o noshare,exclusive $IMAGE_NAME
+assert_locked $DEV
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map $IMAGE_NAME)
+assert_locked $DEV
+rbd resize --size 1G $IMAGE_NAME
+assert_unlocked
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
+assert_locked $DEV
+expect_false rbd resize --size 2G $IMAGE_NAME
+assert_locked $DEV
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map $IMAGE_NAME)
+assert_locked $DEV
+dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
+{ sleep 10; blocklist_add $DEV; } &
+PID=$!
+expect_false dd if=/dev/urandom of=$DEV bs=4k count=200000 oflag=direct
+wait $PID
+# break lock
+OTHER_DEV=$(sudo rbd map -o noshare $IMAGE_NAME)
+assert_locked $OTHER_DEV
+sudo rbd unmap $DEV
+assert_locked $OTHER_DEV
+sudo rbd unmap $OTHER_DEV
+assert_unlocked
+
+# induce a watch error after 30 seconds
+DEV=$(sudo rbd map -o exclusive,osdkeepalive=60 $IMAGE_NAME)
+assert_locked $DEV
+OLD_WATCHER="$(rados -p rbd listwatchers rbd_header.$IMAGE_ID)"
+sleep 40
+assert_locked $DEV
+NEW_WATCHER="$(rados -p rbd listwatchers rbd_header.$IMAGE_ID)"
+# same client_id, old cookie < new cookie
+[ "$(echo "$OLD_WATCHER" | cut -d ' ' -f 2)" = \
+ "$(echo "$NEW_WATCHER" | cut -d ' ' -f 2)" ]
+[[ $(echo "$OLD_WATCHER" | cut -d ' ' -f 3 | cut -d '=' -f 2) -lt \
+ $(echo "$NEW_WATCHER" | cut -d ' ' -f 3 | cut -d '=' -f 2) ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_fallocate.sh b/qa/workunits/rbd/krbd_fallocate.sh
new file mode 100755
index 000000000..79efa1a8b
--- /dev/null
+++ b/qa/workunits/rbd/krbd_fallocate.sh
@@ -0,0 +1,151 @@
+#!/usr/bin/env bash
+
+# - fallocate -z deallocates because BLKDEV_ZERO_NOUNMAP hint is ignored by
+# krbd
+#
+# - big unaligned blkdiscard and fallocate -z/-p leave the objects in place
+
+set -ex
+
+# no blkdiscard(8) in trusty
+function py_blkdiscard() {
+ local offset=$1
+
+ python3 <<EOF
+import fcntl, struct
+BLKDISCARD = 0x1277
+with open('$DEV', 'w') as dev:
+ fcntl.ioctl(dev, BLKDISCARD, struct.pack('QQ', $offset, $IMAGE_SIZE - $offset))
+EOF
+}
+
+# fallocate(1) in trusty doesn't support -z/-p
+function py_fallocate() {
+ local mode=$1
+ local offset=$2
+
+ python3 <<EOF
+import os, ctypes, ctypes.util
+FALLOC_FL_KEEP_SIZE = 0x01
+FALLOC_FL_PUNCH_HOLE = 0x02
+FALLOC_FL_ZERO_RANGE = 0x10
+libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
+with open('$DEV', 'w') as dev:
+ if libc.fallocate(dev.fileno(), ctypes.c_int($mode), ctypes.c_long($offset), ctypes.c_long($IMAGE_SIZE - $offset)):
+ err = ctypes.get_errno()
+ raise OSError(err, os.strerror(err))
+EOF
+}
+
+function allocate() {
+ xfs_io -c "pwrite -b $OBJECT_SIZE -W 0 $IMAGE_SIZE" $DEV
+ assert_allocated
+}
+
+function assert_allocated() {
+ cmp <(od -xAx $DEV) - <<EOF
+000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd
+*
+$(printf %x $IMAGE_SIZE)
+EOF
+ [[ $(rados -p rbd ls | grep -c rbd_data.$IMAGE_ID) -eq $NUM_OBJECTS ]]
+}
+
+function assert_zeroes() {
+ local num_objects_expected=$1
+
+ cmp <(od -xAx $DEV) - <<EOF
+000000 0000 0000 0000 0000 0000 0000 0000 0000
+*
+$(printf %x $IMAGE_SIZE)
+EOF
+ [[ $(rados -p rbd ls | grep -c rbd_data.$IMAGE_ID) -eq $num_objects_expected ]]
+}
+
+function assert_zeroes_unaligned() {
+ local num_objects_expected=$1
+
+ cmp <(od -xAx $DEV) - <<EOF
+000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd
+*
+$(printf %x $((OBJECT_SIZE / 2))) 0000 0000 0000 0000 0000 0000 0000 0000
+*
+$(printf %x $IMAGE_SIZE)
+EOF
+ [[ $(rados -p rbd ls | grep -c rbd_data.$IMAGE_ID) -eq $num_objects_expected ]]
+ for ((i = 0; i < $num_objects_expected; i++)); do
+ rados -p rbd stat rbd_data.$IMAGE_ID.$(printf %016x $i) | egrep "(size $((OBJECT_SIZE / 2)))|(size 0)"
+ done
+}
+
+IMAGE_NAME="fallocate-test"
+
+rbd create --size 200 $IMAGE_NAME
+
+IMAGE_SIZE=$(rbd info --format=json $IMAGE_NAME | python3 -c 'import sys, json; print(json.load(sys.stdin)["size"])')
+OBJECT_SIZE=$(rbd info --format=json $IMAGE_NAME | python3 -c 'import sys, json; print(json.load(sys.stdin)["object_size"])')
+NUM_OBJECTS=$((IMAGE_SIZE / OBJECT_SIZE))
+[[ $((IMAGE_SIZE % OBJECT_SIZE)) -eq 0 ]]
+
+IMAGE_ID="$(rbd info --format=json $IMAGE_NAME |
+ python3 -c "import sys, json; print(json.load(sys.stdin)['block_name_prefix'].split('.')[1])")"
+
+DEV=$(sudo rbd map $IMAGE_NAME)
+
+# make sure -ENOENT is hidden
+assert_zeroes 0
+py_blkdiscard 0
+assert_zeroes 0
+
+# blkdev_issue_discard
+allocate
+py_blkdiscard 0
+assert_zeroes 0
+
+# blkdev_issue_zeroout w/ BLKDEV_ZERO_NOUNMAP
+allocate
+py_fallocate FALLOC_FL_ZERO_RANGE\|FALLOC_FL_KEEP_SIZE 0
+assert_zeroes 0
+
+# blkdev_issue_zeroout w/ BLKDEV_ZERO_NOFALLBACK
+allocate
+py_fallocate FALLOC_FL_PUNCH_HOLE\|FALLOC_FL_KEEP_SIZE 0
+assert_zeroes 0
+
+# unaligned blkdev_issue_discard
+allocate
+py_blkdiscard $((OBJECT_SIZE / 2))
+assert_zeroes_unaligned $NUM_OBJECTS
+
+# unaligned blkdev_issue_zeroout w/ BLKDEV_ZERO_NOUNMAP
+allocate
+py_fallocate FALLOC_FL_ZERO_RANGE\|FALLOC_FL_KEEP_SIZE $((OBJECT_SIZE / 2))
+assert_zeroes_unaligned $NUM_OBJECTS
+
+# unaligned blkdev_issue_zeroout w/ BLKDEV_ZERO_NOFALLBACK
+allocate
+py_fallocate FALLOC_FL_PUNCH_HOLE\|FALLOC_FL_KEEP_SIZE $((OBJECT_SIZE / 2))
+assert_zeroes_unaligned $NUM_OBJECTS
+
+sudo rbd unmap $DEV
+
+DEV=$(sudo rbd map -o notrim $IMAGE_NAME)
+
+# blkdev_issue_discard
+allocate
+py_blkdiscard 0 |& grep 'Operation not supported'
+assert_allocated
+
+# blkdev_issue_zeroout w/ BLKDEV_ZERO_NOUNMAP
+allocate
+py_fallocate FALLOC_FL_ZERO_RANGE\|FALLOC_FL_KEEP_SIZE 0
+assert_zeroes $NUM_OBJECTS
+
+# blkdev_issue_zeroout w/ BLKDEV_ZERO_NOFALLBACK
+allocate
+py_fallocate FALLOC_FL_PUNCH_HOLE\|FALLOC_FL_KEEP_SIZE 0 |& grep 'Operation not supported'
+assert_allocated
+
+sudo rbd unmap $DEV
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_huge_osdmap.sh b/qa/workunits/rbd/krbd_huge_osdmap.sh
new file mode 100755
index 000000000..0a550d674
--- /dev/null
+++ b/qa/workunits/rbd/krbd_huge_osdmap.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+
+# This is a test for https://tracker.ceph.com/issues/40481.
+#
+# An osdmap with 60000 slots encodes to ~16M, of which the ignored portion
+# is ~13M. However in-memory osdmap is larger than ~3M: in-memory osd_addr
+# array for 60000 OSDs is ~8M because of sockaddr_storage.
+#
+# Set mon_max_osd = 60000 in ceph.conf.
+
+set -ex
+
+function expect_false() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+function run_test() {
+ local dev
+
+ # initially tiny, grow via incrementals
+ dev=$(sudo rbd map img)
+ for max in 8 60 600 6000 60000; do
+ ceph osd setmaxosd $max
+ expect_false sudo rbd map wait_for/latest_osdmap
+ xfs_io -c 'pwrite -w 0 12M' $DEV
+ done
+ ceph osd getcrushmap -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin
+ expect_false sudo rbd map wait_for/latest_osdmap
+ xfs_io -c 'pwrite -w 0 12M' $DEV
+ sudo rbd unmap $dev
+
+ # initially huge, shrink via incrementals
+ dev=$(sudo rbd map img)
+ for max in 60000 6000 600 60 8; do
+ ceph osd setmaxosd $max
+ expect_false sudo rbd map wait_for/latest_osdmap
+ xfs_io -c 'pwrite -w 0 12M' $DEV
+ done
+ ceph osd getcrushmap -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin
+ expect_false sudo rbd map wait_for/latest_osdmap
+ xfs_io -c 'pwrite -w 0 12M' $DEV
+ sudo rbd unmap $dev
+}
+
+rbd create --size 12M img
+run_test
+# repeat with primary affinity (adds an extra array)
+ceph osd primary-affinity osd.0 0.5
+run_test
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_latest_osdmap_on_map.sh b/qa/workunits/rbd/krbd_latest_osdmap_on_map.sh
new file mode 100755
index 000000000..f70f38639
--- /dev/null
+++ b/qa/workunits/rbd/krbd_latest_osdmap_on_map.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+set -ex
+
+function run_test() {
+ ceph osd pool create foo 12
+ rbd pool init foo
+ rbd create --size 1 foo/img
+
+ local dev
+ dev=$(sudo rbd map foo/img)
+ sudo rbd unmap $dev
+
+ ceph osd pool delete foo foo --yes-i-really-really-mean-it
+}
+
+NUM_ITER=20
+
+for ((i = 0; i < $NUM_ITER; i++)); do
+ run_test
+done
+
+rbd create --size 1 img
+DEV=$(sudo rbd map img)
+for ((i = 0; i < $NUM_ITER; i++)); do
+ run_test
+done
+sudo rbd unmap $DEV
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_namespaces.sh b/qa/workunits/rbd/krbd_namespaces.sh
new file mode 100755
index 000000000..0273d8499
--- /dev/null
+++ b/qa/workunits/rbd/krbd_namespaces.sh
@@ -0,0 +1,116 @@
+#!/usr/bin/env bash
+
+set -ex
+
+function get_block_name_prefix() {
+ rbd info --format=json $1 | python3 -c "import sys, json; print(json.load(sys.stdin)['block_name_prefix'])"
+}
+
+function do_pwrite() {
+ local spec=$1
+ local old_byte=$2
+ local new_byte=$3
+
+ local dev
+ dev=$(sudo rbd map $spec)
+ cmp <(dd if=/dev/zero bs=1M count=10 | tr \\000 \\$old_byte) $dev
+ xfs_io -c "pwrite -b 1M -S $new_byte 0 10M" $dev
+ sudo rbd unmap $dev
+}
+
+function do_cmp() {
+ local spec=$1
+ local byte=$2
+
+ local dev
+ dev=$(sudo rbd map $spec)
+ cmp <(dd if=/dev/zero bs=1M count=10 | tr \\000 \\$byte) $dev
+ sudo rbd unmap $dev
+}
+
+function gen_child_specs() {
+ local i=$1
+
+ local child_specs="foo/img$i-clone1 foo/img$i-clone2 foo/ns1/img$i-clone1 foo/ns1/img$i-clone2"
+ if [[ $i -ge 3 ]]; then
+ child_specs="$child_specs foo/ns2/img$i-clone1 foo/ns2/img$i-clone2"
+ fi
+ echo $child_specs
+}
+
+ceph osd pool create foo 12
+rbd pool init foo
+ceph osd pool create bar 12
+rbd pool init bar
+
+ceph osd set-require-min-compat-client nautilus
+rbd namespace create foo/ns1
+rbd namespace create foo/ns2
+
+SPECS=(foo/img1 foo/img2 foo/ns1/img3 foo/ns1/img4)
+
+COUNT=1
+for spec in "${SPECS[@]}"; do
+ if [[ $spec =~ img1|img3 ]]; then
+ rbd create --size 10 $spec
+ else
+ rbd create --size 10 --data-pool bar $spec
+ fi
+ do_pwrite $spec 000 $(printf %03d $COUNT)
+ rbd snap create $spec@snap
+ COUNT=$((COUNT + 1))
+done
+for i in {1..4}; do
+ for child_spec in $(gen_child_specs $i); do
+ if [[ $child_spec =~ clone1 ]]; then
+ rbd clone ${SPECS[i - 1]}@snap $child_spec
+ else
+ rbd clone --data-pool bar ${SPECS[i - 1]}@snap $child_spec
+ fi
+ do_pwrite $child_spec $(printf %03d $i) $(printf %03d $COUNT)
+ COUNT=$((COUNT + 1))
+ done
+done
+
+[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img1)) -eq 3 ]]
+[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img2)) -eq 3 ]]
+[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img3)) -eq 3 ]]
+[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img4)) -eq 3 ]]
+
+[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img1-clone1)) -eq 3 ]]
+[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img1-clone2)) -eq 3 ]]
+[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img1-clone1)) -eq 3 ]]
+[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img1-clone2)) -eq 3 ]]
+
+[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img2-clone1)) -eq 3 ]]
+[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img2-clone2)) -eq 3 ]]
+[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img2-clone1)) -eq 3 ]]
+[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img2-clone2)) -eq 3 ]]
+
+[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img3-clone1)) -eq 3 ]]
+[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img3-clone2)) -eq 3 ]]
+[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img3-clone1)) -eq 3 ]]
+[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img3-clone2)) -eq 3 ]]
+[[ $(rados -p foo -N ns2 ls | grep -c $(get_block_name_prefix foo/ns2/img3-clone1)) -eq 3 ]]
+[[ $(rados -p bar -N ns2 ls | grep -c $(get_block_name_prefix foo/ns2/img3-clone2)) -eq 3 ]]
+
+[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img4-clone1)) -eq 3 ]]
+[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img4-clone2)) -eq 3 ]]
+[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img4-clone1)) -eq 3 ]]
+[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img4-clone2)) -eq 3 ]]
+[[ $(rados -p foo -N ns2 ls | grep -c $(get_block_name_prefix foo/ns2/img4-clone1)) -eq 3 ]]
+[[ $(rados -p bar -N ns2 ls | grep -c $(get_block_name_prefix foo/ns2/img4-clone2)) -eq 3 ]]
+
+COUNT=1
+for spec in "${SPECS[@]}"; do
+ do_cmp $spec $(printf %03d $COUNT)
+ COUNT=$((COUNT + 1))
+done
+for i in {1..4}; do
+ for child_spec in $(gen_child_specs $i); do
+ do_cmp $child_spec $(printf %03d $COUNT)
+ COUNT=$((COUNT + 1))
+ done
+done
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_rxbounce.sh b/qa/workunits/rbd/krbd_rxbounce.sh
new file mode 100755
index 000000000..ad00e3f96
--- /dev/null
+++ b/qa/workunits/rbd/krbd_rxbounce.sh
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+
+set -ex
+
+rbd create --size 256 img
+
+IMAGE_SIZE=$(rbd info --format=json img | python3 -c 'import sys, json; print(json.load(sys.stdin)["size"])')
+OBJECT_SIZE=$(rbd info --format=json img | python3 -c 'import sys, json; print(json.load(sys.stdin)["object_size"])')
+NUM_OBJECTS=$((IMAGE_SIZE / OBJECT_SIZE))
+[[ $((IMAGE_SIZE % OBJECT_SIZE)) -eq 0 ]]
+OP_SIZE=16384
+
+DEV=$(sudo rbd map img)
+{
+ for ((i = 0; i < $NUM_OBJECTS; i++)); do
+ echo pwrite -b $OP_SIZE -S $i $((i * OBJECT_SIZE)) $OP_SIZE
+ done
+ echo fsync
+ echo quit
+} | xfs_io $DEV
+sudo rbd unmap $DEV
+
+g++ -xc++ -o racereads - -lpthread <<EOF
+#include <assert.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <thread>
+#include <vector>
+
+const int object_size = $OBJECT_SIZE;
+const int num_objects = $NUM_OBJECTS;
+const int read_len = $OP_SIZE;
+const int num_reads = 1024;
+
+int main() {
+ int fd = open("$DEV", O_DIRECT | O_RDONLY);
+ assert(fd >= 0);
+
+ void *buf;
+ int r = posix_memalign(&buf, 512, read_len);
+ assert(r == 0);
+
+ std::vector<std::thread> threads;
+ for (int i = 0; i < num_objects; i++) {
+ threads.emplace_back(
+ [fd, buf, read_off = static_cast<off_t>(i) * object_size]() {
+ for (int i = 0; i < num_reads; i++) {
+ auto len = pread(fd, buf, read_len, read_off);
+ assert(len == read_len);
+ }
+ });
+ }
+
+ for (auto &t : threads) {
+ t.join();
+ }
+}
+EOF
+
+DEV=$(sudo rbd map -o ms_mode=legacy img)
+sudo dmesg -C
+./racereads
+[[ $(dmesg | grep -c 'libceph: osd.* bad crc/signature') -gt 100 ]]
+sudo rbd unmap $DEV
+
+DEV=$(sudo rbd map -o ms_mode=legacy,rxbounce img)
+sudo dmesg -C
+./racereads
+[[ $(dmesg | grep -c 'libceph: osd.* bad crc/signature') -eq 0 ]]
+sudo rbd unmap $DEV
+
+DEV=$(sudo rbd map -o ms_mode=crc img)
+sudo dmesg -C
+./racereads
+[[ $(dmesg | grep -c 'libceph: osd.* integrity error') -gt 100 ]]
+sudo rbd unmap $DEV
+
+DEV=$(sudo rbd map -o ms_mode=crc,rxbounce img)
+sudo dmesg -C
+./racereads
+[[ $(dmesg | grep -c 'libceph: osd.* integrity error') -eq 0 ]]
+sudo rbd unmap $DEV
+
+# rxbounce is a no-op for secure mode
+DEV=$(sudo rbd map -o ms_mode=secure img)
+sudo dmesg -C
+./racereads
+[[ $(dmesg | grep -c 'libceph: osd.* integrity error') -eq 0 ]]
+sudo rbd unmap $DEV
+
+DEV=$(sudo rbd map -o ms_mode=secure,rxbounce img)
+sudo dmesg -C
+./racereads
+[[ $(dmesg | grep -c 'libceph: osd.* integrity error') -eq 0 ]]
+sudo rbd unmap $DEV
+
+rbd rm img
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_stable_writes.sh b/qa/workunits/rbd/krbd_stable_writes.sh
new file mode 100755
index 000000000..d00e5fd04
--- /dev/null
+++ b/qa/workunits/rbd/krbd_stable_writes.sh
@@ -0,0 +1,141 @@
+#!/usr/bin/env bash
+
+set -ex
+
+function assert_dm() {
+ local name=$1
+ local val=$2
+
+ local devno
+ devno=$(sudo dmsetup info -c --noheadings -o Major,Minor $name)
+ grep -q $val /sys/dev/block/$devno/queue/stable_writes
+}
+
+function dmsetup_reload() {
+ local name=$1
+
+ local table
+ table=$(</dev/stdin)
+
+ sudo dmsetup suspend $name
+ echo "$table" | sudo dmsetup reload $name
+ sudo dmsetup resume $name
+}
+
+IMAGE_NAME="stable-writes-test"
+
+rbd create --size 1 $IMAGE_NAME
+DEV=$(sudo rbd map $IMAGE_NAME)
+
+fallocate -l 1M loopfile
+LOOP_DEV=$(sudo losetup -f --show loopfile)
+
+[[ $(blockdev --getsize64 $DEV) -eq 1048576 ]]
+grep -q 1 /sys/block/${DEV#/dev/}/queue/stable_writes
+
+rbd resize --size 2 $IMAGE_NAME
+[[ $(blockdev --getsize64 $DEV) -eq 2097152 ]]
+grep -q 1 /sys/block/${DEV#/dev/}/queue/stable_writes
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 0
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $DEV 0
+EOF
+assert_dm tbl 1
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $LOOP_DEV 0
+1024 2048 error
+EOF
+assert_dm tbl 0
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $DEV 0
+1024 2048 error
+EOF
+assert_dm tbl 1
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $LOOP_DEV 0
+1024 2048 linear $DEV 0
+EOF
+assert_dm tbl 1
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $DEV 0
+1024 2048 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 1
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 0
+cat <<EOF | dmsetup_reload tbl
+0 1024 linear $LOOP_DEV 0
+1024 2048 linear $DEV 0
+EOF
+assert_dm tbl 1
+cat <<EOF | dmsetup_reload tbl
+0 1024 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 0
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $DEV 0
+EOF
+assert_dm tbl 1
+cat <<EOF | dmsetup_reload tbl
+0 1024 linear $DEV 0
+1024 2048 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 1
+cat <<EOF | dmsetup_reload tbl
+0 1024 linear $DEV 0
+EOF
+assert_dm tbl 1
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $DEV 0
+EOF
+assert_dm tbl 1
+cat <<EOF | dmsetup_reload tbl
+0 1024 linear $DEV 0
+1024 2048 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 1
+cat <<EOF | dmsetup_reload tbl
+0 1024 error
+1024 2048 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 0
+cat <<EOF | dmsetup_reload tbl
+0 1024 linear $DEV 0
+1024 2048 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 1
+cat <<EOF | dmsetup_reload tbl
+0 1024 linear $DEV 0
+EOF
+assert_dm tbl 1
+sudo dmsetup remove tbl
+
+sudo losetup -d $LOOP_DEV
+rm loopfile
+
+sudo rbd unmap $DEV
+rbd rm $IMAGE_NAME
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_udev_enumerate.sh b/qa/workunits/rbd/krbd_udev_enumerate.sh
new file mode 100755
index 000000000..494f958f8
--- /dev/null
+++ b/qa/workunits/rbd/krbd_udev_enumerate.sh
@@ -0,0 +1,66 @@
+#!/usr/bin/env bash
+
+# This is a test for https://tracker.ceph.com/issues/41036, but it also
+# triggers https://tracker.ceph.com/issues/41404 in some environments.
+
+set -ex
+
+function assert_exit_codes() {
+ declare -a pids=($@)
+
+ for pid in ${pids[@]}; do
+ wait $pid
+ done
+}
+
+function run_map() {
+ declare -a pids
+
+ for i in {1..300}; do
+ sudo rbd map img$i &
+ pids+=($!)
+ done
+
+ assert_exit_codes ${pids[@]}
+ [[ $(rbd showmapped | wc -l) -eq 301 ]]
+}
+
+function run_unmap_by_dev() {
+ declare -a pids
+
+ run_map
+ for i in {0..299}; do
+ sudo rbd unmap /dev/rbd$i &
+ pids+=($!)
+ done
+
+ assert_exit_codes ${pids[@]}
+ [[ $(rbd showmapped | wc -l) -eq 0 ]]
+}
+
+function run_unmap_by_spec() {
+ declare -a pids
+
+ run_map
+ for i in {1..300}; do
+ sudo rbd unmap img$i &
+ pids+=($!)
+ done
+
+ assert_exit_codes ${pids[@]}
+ [[ $(rbd showmapped | wc -l) -eq 0 ]]
+}
+
+# Can't test with exclusive-lock, don't bother enabling deep-flatten.
+# See https://tracker.ceph.com/issues/42492.
+for i in {1..300}; do
+ rbd create --size 1 --image-feature '' img$i
+done
+
+for i in {1..30}; do
+ echo Iteration $i
+ run_unmap_by_dev
+ run_unmap_by_spec
+done
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_udev_netlink_enobufs.sh b/qa/workunits/rbd/krbd_udev_netlink_enobufs.sh
new file mode 100755
index 000000000..7c9c53a2f
--- /dev/null
+++ b/qa/workunits/rbd/krbd_udev_netlink_enobufs.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+# This is a test for https://tracker.ceph.com/issues/41404, verifying that udev
+# events are properly reaped while the image is being (un)mapped in the kernel.
+# UDEV_BUF_SIZE is 1M (giving us a 2M socket receive buffer), but modprobe +
+# modprobe -r generate ~28M worth of "block" events.
+
+set -ex
+
+rbd create --size 1 img
+
+ceph osd pause
+sudo rbd map img &
+PID=$!
+sudo modprobe scsi_debug max_luns=16 add_host=16 num_parts=1 num_tgts=16
+sudo udevadm settle
+sudo modprobe -r scsi_debug
+[[ $(rbd showmapped | wc -l) -eq 0 ]]
+ceph osd unpause
+wait $PID
+[[ $(rbd showmapped | wc -l) -eq 2 ]]
+sudo rbd unmap img
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_udev_netns.sh b/qa/workunits/rbd/krbd_udev_netns.sh
new file mode 100755
index 000000000..e746a682e
--- /dev/null
+++ b/qa/workunits/rbd/krbd_udev_netns.sh
@@ -0,0 +1,86 @@
+#!/usr/bin/env bash
+
+set -ex
+
+sudo ip netns add ns1
+sudo ip link add veth1-ext type veth peer name veth1-int
+sudo ip link set veth1-int netns ns1
+
+sudo ip netns exec ns1 ip link set dev lo up
+sudo ip netns exec ns1 ip addr add 192.168.1.2/24 dev veth1-int
+sudo ip netns exec ns1 ip link set veth1-int up
+sudo ip netns exec ns1 ip route add default via 192.168.1.1
+
+sudo ip addr add 192.168.1.1/24 dev veth1-ext
+sudo ip link set veth1-ext up
+
+# Enable forwarding between the namespace and the default route
+# interface and set up NAT. In case of multiple default routes,
+# just pick the first one.
+if [[ $(sysctl -n net.ipv4.ip_forward) -eq 0 ]]; then
+ sudo iptables -P FORWARD DROP
+ sudo sysctl -w net.ipv4.ip_forward=1
+fi
+IFACE="$(ip route list 0.0.0.0/0 | head -n 1 | cut -d ' ' -f 5)"
+sudo iptables -A FORWARD -i veth1-ext -o "$IFACE" -j ACCEPT
+sudo iptables -A FORWARD -i "$IFACE" -o veth1-ext -j ACCEPT
+sudo iptables -t nat -A POSTROUTING -s 192.168.1.2 -o "$IFACE" -j MASQUERADE
+
+rbd create --size 300 img
+
+DEV="$(sudo rbd map img)"
+mkfs.ext4 "$DEV"
+sudo mount "$DEV" /mnt
+sudo umount /mnt
+sudo rbd unmap "$DEV"
+
+sudo ip netns exec ns1 bash <<'EOF'
+
+set -ex
+
+DEV="/dev/rbd/rbd/img"
+[[ ! -e "$DEV" ]]
+
+# In a network namespace, "rbd map" maps the device and hangs waiting
+# for udev add uevents. udev runs as usual (in particular creating the
+# symlink which is used here because the device node is never printed),
+# but the uevents it sends out never come because they don't cross
+# network namespace boundaries.
+set +e
+timeout 30s rbd map img
+RET=$?
+set -e
+[[ $RET -eq 124 ]]
+[[ -L "$DEV" ]]
+mkfs.ext4 -F "$DEV"
+mount "$DEV" /mnt
+umount /mnt
+
+# In a network namespace, "rbd unmap" unmaps the device and hangs
+# waiting for udev remove uevents. udev runs as usual (removing the
+# symlink), but the uevents it sends out never come because they don't
+# cross network namespace boundaries.
+set +e
+timeout 30s rbd unmap "$DEV"
+RET=$?
+set -e
+[[ $RET -eq 124 ]]
+[[ ! -e "$DEV" ]]
+
+# Skip waiting for udev uevents with "-o noudev".
+DEV="$(rbd map -o noudev img)"
+mkfs.ext4 -F "$DEV"
+mount "$DEV" /mnt
+umount /mnt
+rbd unmap -o noudev "$DEV"
+
+EOF
+
+rbd rm img
+
+sudo iptables -t nat -D POSTROUTING -s 192.168.1.2 -o "$IFACE" -j MASQUERADE
+sudo iptables -D FORWARD -i "$IFACE" -o veth1-ext -j ACCEPT
+sudo iptables -D FORWARD -i veth1-ext -o "$IFACE" -j ACCEPT
+sudo ip netns delete ns1
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_udev_symlinks.sh b/qa/workunits/rbd/krbd_udev_symlinks.sh
new file mode 100755
index 000000000..271476527
--- /dev/null
+++ b/qa/workunits/rbd/krbd_udev_symlinks.sh
@@ -0,0 +1,116 @@
+#!/usr/bin/env bash
+
+set -ex
+
+SPECS=(
+rbd/img1
+rbd/img2
+rbd/img2@snap1
+rbd/img3
+rbd/img3@snap1
+rbd/img3@snap2
+rbd/ns1/img1
+rbd/ns1/img2
+rbd/ns1/img2@snap1
+rbd/ns1/img3
+rbd/ns1/img3@snap1
+rbd/ns1/img3@snap2
+rbd/ns2/img1
+rbd/ns2/img2
+rbd/ns2/img2@snap1
+rbd/ns2/img3
+rbd/ns2/img3@snap1
+rbd/ns2/img3@snap2
+custom/img1
+custom/img1@snap1
+custom/img2
+custom/img2@snap1
+custom/img2@snap2
+custom/img3
+custom/ns1/img1
+custom/ns1/img1@snap1
+custom/ns1/img2
+custom/ns1/img2@snap1
+custom/ns1/img2@snap2
+custom/ns1/img3
+custom/ns2/img1
+custom/ns2/img1@snap1
+custom/ns2/img2
+custom/ns2/img2@snap1
+custom/ns2/img2@snap2
+custom/ns2/img3
+)
+
+ceph osd pool create custom 8
+rbd pool init custom
+
+ceph osd set-require-min-compat-client nautilus
+rbd namespace create rbd/ns1
+rbd namespace create rbd/ns2
+rbd namespace create custom/ns1
+rbd namespace create custom/ns2
+
+# create in order, images before snapshots
+for spec in "${SPECS[@]}"; do
+ if [[ "$spec" =~ snap ]]; then
+ rbd snap create "$spec"
+ else
+ rbd create --size 10 "$spec"
+ DEV="$(sudo rbd map "$spec")"
+ sudo sfdisk "$DEV" <<EOF
+unit: sectors
+${DEV}p1 : start= 2048, size= 2, type=83
+${DEV}p2 : start= 4096, size= 2, type=83
+EOF
+ sudo rbd unmap "$DEV"
+ fi
+done
+
+[[ ! -e /dev/rbd ]]
+
+# map in random order
+COUNT=${#SPECS[@]}
+read -r -a INDEXES < <(python3 <<EOF
+import random
+l = list(range($COUNT))
+random.shuffle(l)
+print(*l)
+EOF
+)
+
+DEVS=()
+for idx in "${INDEXES[@]}"; do
+ DEVS+=("$(sudo rbd map "${SPECS[idx]}")")
+done
+
+[[ $(rbd showmapped | wc -l) -eq $((COUNT + 1)) ]]
+
+for ((i = 0; i < COUNT; i++)); do
+ [[ "$(readlink -e "/dev/rbd/${SPECS[INDEXES[i]]}")" == "${DEVS[i]}" ]]
+ [[ "$(readlink -e "/dev/rbd/${SPECS[INDEXES[i]]}-part1")" == "${DEVS[i]}p1" ]]
+ [[ "$(readlink -e "/dev/rbd/${SPECS[INDEXES[i]]}-part2")" == "${DEVS[i]}p2" ]]
+done
+
+for idx in "${INDEXES[@]}"; do
+ sudo rbd unmap "/dev/rbd/${SPECS[idx]}"
+done
+
+[[ ! -e /dev/rbd ]]
+
+# remove in reverse order, snapshots before images
+for ((i = COUNT - 1; i >= 0; i--)); do
+ if [[ "${SPECS[i]}" =~ snap ]]; then
+ rbd snap rm "${SPECS[i]}"
+ else
+ rbd rm "${SPECS[i]}"
+ fi
+done
+
+rbd namespace rm custom/ns2
+rbd namespace rm custom/ns1
+rbd namespace rm rbd/ns2
+rbd namespace rm rbd/ns1
+
+ceph osd pool delete custom custom --yes-i-really-really-mean-it
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_wac.sh b/qa/workunits/rbd/krbd_wac.sh
new file mode 100755
index 000000000..134460409
--- /dev/null
+++ b/qa/workunits/rbd/krbd_wac.sh
@@ -0,0 +1,40 @@
+#!/usr/bin/env bash
+
+set -ex
+
+wget http://download.ceph.com/qa/wac.c
+gcc -o wac wac.c
+
+rbd create --size 300 img
+DEV=$(sudo rbd map img)
+
+sudo mkfs.ext4 $DEV
+sudo mount $DEV /mnt
+set +e
+sudo timeout 5m ./wac -l 65536 -n 64 -r /mnt/wac-test
+RET=$?
+set -e
+[[ $RET -eq 124 ]]
+sudo killall -w wac || true # wac forks
+sudo umount /mnt
+
+sudo wipefs -a $DEV
+sudo vgcreate vg_img $DEV
+sudo lvcreate -L 256M -n lv_img vg_img
+udevadm settle
+sudo mkfs.ext4 /dev/mapper/vg_img-lv_img
+sudo mount /dev/mapper/vg_img-lv_img /mnt
+set +e
+sudo timeout 5m ./wac -l 65536 -n 64 -r /mnt/wac-test
+RET=$?
+set -e
+[[ $RET -eq 124 ]]
+sudo killall -w wac || true # wac forks
+sudo umount /mnt
+sudo vgremove -f vg_img
+sudo pvremove $DEV
+
+sudo rbd unmap $DEV
+rbd rm img
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_watch_errors.sh b/qa/workunits/rbd/krbd_watch_errors.sh
new file mode 100755
index 000000000..f650d2a74
--- /dev/null
+++ b/qa/workunits/rbd/krbd_watch_errors.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+
+set -ex
+set -o pipefail
+
+function refresh_loop() {
+ local dev_id="$1"
+
+ set +x
+
+ local i
+ for ((i = 1; ; i++)); do
+ echo 1 | sudo tee "${SYSFS_DIR}/${dev_id}/refresh" > /dev/null
+ if ((i % 100 == 0)); then
+ echo "Refreshed ${i} times"
+ fi
+ done
+}
+
+readonly SYSFS_DIR="/sys/bus/rbd/devices"
+readonly IMAGE_NAME="watch-errors-test"
+
+rbd create -s 1G --image-feature exclusive-lock "${IMAGE_NAME}"
+
+# induce a watch error every 30 seconds
+dev="$(sudo rbd device map -o osdkeepalive=60 "${IMAGE_NAME}")"
+dev_id="${dev#/dev/rbd}"
+
+# constantly refresh, not just on watch errors
+refresh_loop "${dev_id}" &
+refresh_pid=$!
+
+sudo dmesg -C
+
+# test that none of the above triggers a deadlock with a workload
+fio --name test --filename="${dev}" --ioengine=libaio --direct=1 \
+ --rw=randwrite --norandommap --randrepeat=0 --bs=512 --iodepth=128 \
+ --time_based --runtime=1h --eta=never
+
+num_errors="$(dmesg | grep -c "rbd${dev_id}: encountered watch error")"
+echo "Recorded ${num_errors} watch errors"
+
+kill "${refresh_pid}"
+wait
+
+sudo rbd device unmap "${dev}"
+
+if ((num_errors < 60)); then
+ echo "Too few watch errors"
+ exit 1
+fi
+
+echo OK
diff --git a/qa/workunits/rbd/luks-encryption.sh b/qa/workunits/rbd/luks-encryption.sh
new file mode 100755
index 000000000..5d3cc68cd
--- /dev/null
+++ b/qa/workunits/rbd/luks-encryption.sh
@@ -0,0 +1,217 @@
+#!/usr/bin/env bash
+set -ex
+
+CEPH_ID=${CEPH_ID:-admin}
+TMP_FILES="/tmp/passphrase /tmp/passphrase2 /tmp/testdata1 /tmp/testdata2 /tmp/cmpdata"
+
+_sudo()
+{
+ local cmd
+
+ if [ `id -u` -eq 0 ]
+ then
+ "$@"
+ return $?
+ fi
+
+ # Look for the command in the user path. If it fails run it as is,
+ # supposing it is in sudo path.
+ cmd=`which $1 2>/dev/null` || cmd=$1
+ shift
+ sudo -nE "${cmd}" "$@"
+}
+
+function drop_caches {
+ sudo sync
+ echo 3 | sudo tee /proc/sys/vm/drop_caches
+}
+
+function expect_false() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+function test_encryption_format() {
+ local format=$1
+ clean_up_cryptsetup
+
+ # format
+ rbd encryption format testimg $format /tmp/passphrase
+ drop_caches
+
+ # open encryption with cryptsetup
+ sudo cryptsetup open $RAW_DEV --type luks cryptsetupdev -d /tmp/passphrase
+ sudo chmod 666 /dev/mapper/cryptsetupdev
+
+ # open encryption with librbd
+ LIBRBD_DEV=$(_sudo rbd -p rbd map testimg -t nbd -o encryption-passphrase-file=/tmp/passphrase)
+ sudo chmod 666 $LIBRBD_DEV
+
+ # write via librbd && compare
+ dd if=/tmp/testdata1 of=$LIBRBD_DEV oflag=direct bs=1M
+ dd if=/dev/mapper/cryptsetupdev of=/tmp/cmpdata iflag=direct bs=4M count=4
+ cmp -n 16MB /tmp/cmpdata /tmp/testdata1
+
+ # write via cryptsetup && compare
+ dd if=/tmp/testdata2 of=/dev/mapper/cryptsetupdev oflag=direct bs=1M
+ dd if=$LIBRBD_DEV of=/tmp/cmpdata iflag=direct bs=4M count=4
+ cmp -n 16MB /tmp/cmpdata /tmp/testdata2
+
+ # FIXME: encryption-aware flatten/resize misbehave if proxied to
+ # RAW_DEV mapping (i.e. if RAW_DEV mapping ows the lock)
+ # (acquire and) release the lock as a side effect
+ rbd bench --io-type read --io-size 1 --io-threads 1 --io-total 1 testimg
+
+ # check that encryption-aware resize compensates LUKS header overhead
+ (( $(sudo blockdev --getsize64 $LIBRBD_DEV) < (32 << 20) ))
+ expect_false rbd resize --size 32M testimg
+ rbd resize --size 32M --encryption-passphrase-file /tmp/passphrase testimg
+ (( $(sudo blockdev --getsize64 $LIBRBD_DEV) == (32 << 20) ))
+
+ _sudo rbd device unmap -t nbd $LIBRBD_DEV
+}
+
+function test_clone_encryption() {
+ clean_up_cryptsetup
+
+ # write 1MB plaintext
+ dd if=/tmp/testdata1 of=$RAW_DEV oflag=direct bs=1M count=1
+
+ # clone (luks1)
+ rbd snap create testimg@snap
+ rbd snap protect testimg@snap
+ rbd clone testimg@snap testimg1
+ rbd encryption format testimg1 luks1 /tmp/passphrase
+
+ # open encryption with librbd, write one more MB, close
+ LIBRBD_DEV=$(_sudo rbd -p rbd map testimg1 -t nbd -o encryption-format=luks1,encryption-passphrase-file=/tmp/passphrase)
+ sudo chmod 666 $LIBRBD_DEV
+ dd if=$LIBRBD_DEV of=/tmp/cmpdata iflag=direct bs=1M count=1
+ cmp -n 1MB /tmp/cmpdata /tmp/testdata1
+ dd if=/tmp/testdata1 of=$LIBRBD_DEV seek=1 skip=1 oflag=direct bs=1M count=1
+ _sudo rbd device unmap -t nbd $LIBRBD_DEV
+
+ # second clone (luks2)
+ rbd snap create testimg1@snap
+ rbd snap protect testimg1@snap
+ rbd clone testimg1@snap testimg2
+ rbd encryption format testimg2 luks2 /tmp/passphrase2
+
+ # open encryption with librbd, write one more MB, close
+ LIBRBD_DEV=$(_sudo rbd -p rbd map testimg2 -t nbd -o encryption-format=luks2,encryption-passphrase-file=/tmp/passphrase2,encryption-format=luks1,encryption-passphrase-file=/tmp/passphrase)
+ sudo chmod 666 $LIBRBD_DEV
+ dd if=$LIBRBD_DEV of=/tmp/cmpdata iflag=direct bs=1M count=2
+ cmp -n 2MB /tmp/cmpdata /tmp/testdata1
+ dd if=/tmp/testdata1 of=$LIBRBD_DEV seek=2 skip=2 oflag=direct bs=1M count=1
+ _sudo rbd device unmap -t nbd $LIBRBD_DEV
+
+ # flatten
+ expect_false rbd flatten testimg2 --encryption-format luks1 --encryption-format luks2 --encryption-passphrase-file /tmp/passphrase2 --encryption-passphrase-file /tmp/passphrase
+ rbd flatten testimg2 --encryption-format luks2 --encryption-format luks1 --encryption-passphrase-file /tmp/passphrase2 --encryption-passphrase-file /tmp/passphrase
+
+ # verify with cryptsetup
+ RAW_FLAT_DEV=$(_sudo rbd -p rbd map testimg2 -t nbd)
+ sudo cryptsetup open $RAW_FLAT_DEV --type luks cryptsetupdev -d /tmp/passphrase2
+ sudo chmod 666 /dev/mapper/cryptsetupdev
+ dd if=/dev/mapper/cryptsetupdev of=/tmp/cmpdata iflag=direct bs=1M count=3
+ cmp -n 3MB /tmp/cmpdata /tmp/testdata1
+ _sudo rbd device unmap -t nbd $RAW_FLAT_DEV
+}
+
+function test_clone_and_load_with_a_single_passphrase {
+ local expectedfail=$1
+
+ # clone and format
+ rbd snap create testimg@snap
+ rbd snap protect testimg@snap
+ rbd clone testimg@snap testimg1
+ rbd encryption format testimg1 luks2 /tmp/passphrase2
+
+ if [ "$expectedfail" = "true" ]
+ then
+ expect_false rbd flatten testimg1 --encryption-passphrase-file /tmp/passphrase2
+ rbd flatten testimg1 --encryption-passphrase-file /tmp/passphrase2 --encryption-passphrase-file /tmp/passphrase
+ else
+ rbd flatten testimg1 --encryption-passphrase-file /tmp/passphrase2
+ fi
+
+ rbd remove testimg1
+ rbd snap unprotect testimg@snap
+ rbd snap remove testimg@snap
+}
+
+function test_plaintext_detection {
+ # 16k LUKS header
+ sudo cryptsetup -q luksFormat --type luks2 --luks2-metadata-size 16k $RAW_DEV /tmp/passphrase
+ test_clone_and_load_with_a_single_passphrase true
+
+ # 4m LUKS header
+ sudo cryptsetup -q luksFormat --type luks2 --luks2-metadata-size 4m $RAW_DEV /tmp/passphrase
+ test_clone_and_load_with_a_single_passphrase true
+
+ # no luks header
+ dd if=/dev/zero of=$RAW_DEV oflag=direct bs=4M count=8
+ test_clone_and_load_with_a_single_passphrase false
+}
+
+function get_nbd_device_paths {
+ rbd device list -t nbd | tail -n +2 | egrep "\s+rbd\s+testimg" | awk '{print $5;}'
+}
+
+function clean_up_cryptsetup() {
+ ls /dev/mapper/cryptsetupdev && sudo cryptsetup close cryptsetupdev || true
+}
+
+function clean_up {
+ sudo rm -f $TMP_FILES
+ clean_up_cryptsetup
+ for device in $(get_nbd_device_paths); do
+ _sudo rbd device unmap -t nbd $device
+ done
+
+ rbd remove testimg2 || true
+ rbd snap unprotect testimg1@snap || true
+ rbd snap remove testimg1@snap || true
+ rbd remove testimg1 || true
+ rbd snap unprotect testimg@snap || true
+ rbd snap remove testimg@snap || true
+ rbd remove testimg || true
+}
+
+if [[ $(uname) != "Linux" ]]; then
+ echo "LUKS encryption tests only supported on Linux"
+ exit 0
+fi
+
+
+if [[ $(($(ceph-conf --name client.${CEPH_ID} --show-config-value rbd_default_features) & 64)) != 0 ]]; then
+ echo "LUKS encryption tests not supported alongside image journaling feature"
+ exit 0
+fi
+
+clean_up
+
+trap clean_up INT TERM EXIT
+
+# generate test data
+dd if=/dev/urandom of=/tmp/testdata1 bs=4M count=4
+dd if=/dev/urandom of=/tmp/testdata2 bs=4M count=4
+
+# create passphrase files
+printf "pass\0word\n" > /tmp/passphrase
+printf "\t password2 " > /tmp/passphrase2
+
+# create an image
+rbd create testimg --size=32M
+
+# map raw data to nbd device
+RAW_DEV=$(_sudo rbd -p rbd map testimg -t nbd)
+sudo chmod 666 $RAW_DEV
+
+test_plaintext_detection
+
+test_encryption_format luks1
+test_encryption_format luks2
+
+test_clone_encryption
+
+echo OK
diff --git a/qa/workunits/rbd/map-snapshot-io.sh b/qa/workunits/rbd/map-snapshot-io.sh
new file mode 100755
index 000000000..a69d84829
--- /dev/null
+++ b/qa/workunits/rbd/map-snapshot-io.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+# http://tracker.ceph.com/issues/3964
+
+set -ex
+
+rbd create image -s 100
+DEV=$(sudo rbd map image)
+dd if=/dev/zero of=$DEV oflag=direct count=10
+rbd snap create image@s1
+dd if=/dev/zero of=$DEV oflag=direct count=10 # used to fail
+rbd snap rm image@s1
+dd if=/dev/zero of=$DEV oflag=direct count=10
+sudo rbd unmap $DEV
+rbd rm image
+
+echo OK
diff --git a/qa/workunits/rbd/map-unmap.sh b/qa/workunits/rbd/map-unmap.sh
new file mode 100755
index 000000000..99863849e
--- /dev/null
+++ b/qa/workunits/rbd/map-unmap.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+set -ex
+
+RUN_TIME=300 # approximate duration of run (seconds)
+
+[ $# -eq 1 ] && RUN_TIME="$1"
+
+IMAGE_NAME="image-$$"
+IMAGE_SIZE="1024" # MB
+
+function get_time() {
+ date '+%s'
+}
+
+function times_up() {
+ local end_time="$1"
+
+ test $(get_time) -ge "${end_time}"
+}
+
+function map_unmap() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+
+ local dev
+ dev="$(sudo rbd map "${image_name}")"
+ sudo rbd unmap "${dev}"
+}
+
+#### Start
+
+rbd create "${IMAGE_NAME}" --size="${IMAGE_SIZE}"
+
+COUNT=0
+START_TIME=$(get_time)
+END_TIME=$(expr $(get_time) + ${RUN_TIME})
+while ! times_up "${END_TIME}"; do
+ map_unmap "${IMAGE_NAME}"
+ COUNT=$(expr $COUNT + 1)
+done
+ELAPSED=$(expr "$(get_time)" - "${START_TIME}")
+
+rbd rm "${IMAGE_NAME}"
+
+echo "${COUNT} iterations completed in ${ELAPSED} seconds"
diff --git a/qa/workunits/rbd/merge_diff.sh b/qa/workunits/rbd/merge_diff.sh
new file mode 100755
index 000000000..eb8597304
--- /dev/null
+++ b/qa/workunits/rbd/merge_diff.sh
@@ -0,0 +1,477 @@
+#!/usr/bin/env bash
+set -ex
+
+export RBD_FORCE_ALLOW_V1=1
+
+pool=rbd
+gen=$pool/gen
+out=$pool/out
+testno=1
+
+mkdir -p merge_diff_test
+pushd merge_diff_test
+
+function expect_false()
+{
+ if "$@"; then return 1; else return 0; fi
+}
+
+function clear_all()
+{
+ fusermount -u mnt || true
+
+ rbd snap purge --no-progress $gen || true
+ rbd rm --no-progress $gen || true
+ rbd snap purge --no-progress $out || true
+ rbd rm --no-progress $out || true
+
+ rm -rf diffs || true
+}
+
+function rebuild()
+{
+ clear_all
+ echo Starting test $testno
+ ((testno++))
+ if [[ "$2" -lt "$1" ]] && [[ "$3" -gt "1" ]]; then
+ rbd create $gen --size 100 --object-size $1 --stripe-unit $2 --stripe-count $3 --image-format $4
+ else
+ rbd create $gen --size 100 --object-size $1 --image-format $4
+ fi
+ rbd create $out --size 1 --object-size 524288
+ mkdir -p mnt diffs
+ # lttng has atexit handlers that need to be fork/clone aware
+ LD_PRELOAD=liblttng-ust-fork.so.0 rbd-fuse -p $pool mnt
+}
+
+function write()
+{
+ dd if=/dev/urandom of=mnt/gen bs=1M conv=notrunc seek=$1 count=$2
+}
+
+function snap()
+{
+ rbd snap create $gen@$1
+}
+
+function resize()
+{
+ rbd resize --no-progress $gen --size $1 --allow-shrink
+}
+
+function export_diff()
+{
+ if [ $2 == "head" ]; then
+ target="$gen"
+ else
+ target="$gen@$2"
+ fi
+ if [ $1 == "null" ]; then
+ rbd export-diff --no-progress $target diffs/$1.$2
+ else
+ rbd export-diff --no-progress $target --from-snap $1 diffs/$1.$2
+ fi
+}
+
+function merge_diff()
+{
+ rbd merge-diff diffs/$1.$2 diffs/$2.$3 diffs/$1.$3
+}
+
+function check()
+{
+ rbd import-diff --no-progress diffs/$1.$2 $out || return -1
+ if [ "$2" == "head" ]; then
+ sum1=`rbd export $gen - | md5sum`
+ else
+ sum1=`rbd export $gen@$2 - | md5sum`
+ fi
+ sum2=`rbd export $out - | md5sum`
+ if [ "$sum1" != "$sum2" ]; then
+ exit -1
+ fi
+ if [ "$2" != "head" ]; then
+ rbd snap ls $out | awk '{print $2}' | grep "^$2\$" || return -1
+ fi
+}
+
+#test f/t header
+rebuild 4194304 4194304 1 2
+write 0 1
+snap a
+write 1 1
+export_diff null a
+export_diff a head
+merge_diff null a head
+check null head
+
+rebuild 4194304 4194304 1 2
+write 0 1
+snap a
+write 1 1
+snap b
+write 2 1
+export_diff null a
+export_diff a b
+export_diff b head
+merge_diff null a b
+check null b
+
+rebuild 4194304 4194304 1 2
+write 0 1
+snap a
+write 1 1
+snap b
+write 2 1
+export_diff null a
+export_diff a b
+export_diff b head
+merge_diff a b head
+check null a
+check a head
+
+rebuild 4194304 4194304 1 2
+write 0 1
+snap a
+write 1 1
+snap b
+write 2 1
+export_diff null a
+export_diff a b
+export_diff b head
+rbd merge-diff diffs/null.a diffs/a.b - | rbd merge-diff - diffs/b.head - > diffs/null.head
+check null head
+
+#data test
+rebuild 4194304 4194304 1 2
+write 4 2
+snap s101
+write 0 3
+write 8 2
+snap s102
+export_diff null s101
+export_diff s101 s102
+merge_diff null s101 s102
+check null s102
+
+rebuild 4194304 4194304 1 2
+write 0 3
+write 2 5
+write 8 2
+snap s201
+write 0 2
+write 6 3
+snap s202
+export_diff null s201
+export_diff s201 s202
+merge_diff null s201 s202
+check null s202
+
+rebuild 4194304 4194304 1 2
+write 0 4
+write 12 6
+snap s301
+write 0 6
+write 10 5
+write 16 4
+snap s302
+export_diff null s301
+export_diff s301 s302
+merge_diff null s301 s302
+check null s302
+
+rebuild 4194304 4194304 1 2
+write 0 12
+write 14 2
+write 18 2
+snap s401
+write 1 2
+write 5 6
+write 13 3
+write 18 2
+snap s402
+export_diff null s401
+export_diff s401 s402
+merge_diff null s401 s402
+check null s402
+
+rebuild 4194304 4194304 1 2
+write 2 4
+write 10 12
+write 27 6
+write 36 4
+snap s501
+write 0 24
+write 28 4
+write 36 4
+snap s502
+export_diff null s501
+export_diff s501 s502
+merge_diff null s501 s502
+check null s502
+
+rebuild 4194304 4194304 1 2
+write 0 8
+resize 5
+snap r1
+resize 20
+write 12 8
+snap r2
+resize 8
+write 4 4
+snap r3
+export_diff null r1
+export_diff r1 r2
+export_diff r2 r3
+merge_diff null r1 r2
+merge_diff null r2 r3
+check null r3
+
+rebuild 4194304 4194304 1 2
+write 0 8
+resize 5
+snap r1
+resize 20
+write 12 8
+snap r2
+resize 8
+write 4 4
+snap r3
+resize 10
+snap r4
+export_diff null r1
+export_diff r1 r2
+export_diff r2 r3
+export_diff r3 r4
+merge_diff null r1 r2
+merge_diff null r2 r3
+merge_diff null r3 r4
+check null r4
+
+# merge diff doesn't yet support fancy striping
+# rebuild 4194304 65536 8 2
+# write 0 32
+# snap r1
+# write 16 32
+# snap r2
+# export_diff null r1
+# export_diff r1 r2
+# expect_false merge_diff null r1 r2
+
+rebuild 4194304 4194304 1 2
+write 0 1
+write 2 1
+write 4 1
+write 6 1
+snap s1
+write 1 1
+write 3 1
+write 5 1
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 1 1
+write 3 1
+write 5 1
+snap s1
+write 0 1
+write 2 1
+write 4 1
+write 6 1
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 3
+write 6 3
+write 12 3
+snap s1
+write 1 1
+write 7 1
+write 13 1
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 3
+write 6 3
+write 12 3
+snap s1
+write 0 1
+write 6 1
+write 12 1
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 3
+write 6 3
+write 12 3
+snap s1
+write 2 1
+write 8 1
+write 14 1
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 1 1
+write 7 1
+write 13 1
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 1
+write 6 1
+write 12 1
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 2 1
+write 8 1
+write 14 1
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 3
+write 6 3
+write 12 3
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 2 4
+write 8 4
+write 14 4
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 4
+write 6 4
+write 12 4
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 6
+write 6 6
+write 12 6
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 3 6
+write 9 6
+write 15 6
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 8
+snap s1
+resize 2
+resize 100
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 8
+snap s1
+resize 2
+resize 100
+snap s2
+write 20 2
+snap s3
+export_diff null s1
+export_diff s1 s2
+export_diff s2 s3
+merge_diff s1 s2 s3
+check null s1
+check s1 s3
+
+#addme
+
+clear_all
+popd
+rm -rf merge_diff_test
+
+echo OK
diff --git a/qa/workunits/rbd/notify_master.sh b/qa/workunits/rbd/notify_master.sh
new file mode 100755
index 000000000..99ccd74db
--- /dev/null
+++ b/qa/workunits/rbd/notify_master.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -ex
+
+relpath=$(dirname $0)/../../../src/test/librbd
+python3 $relpath/test_notify.py master
+exit 0
diff --git a/qa/workunits/rbd/notify_slave.sh b/qa/workunits/rbd/notify_slave.sh
new file mode 100755
index 000000000..7f49a0c7d
--- /dev/null
+++ b/qa/workunits/rbd/notify_slave.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -ex
+
+relpath=$(dirname $0)/../../../src/test/librbd
+python3 $relpath/test_notify.py slave
+exit 0
diff --git a/qa/workunits/rbd/permissions.sh b/qa/workunits/rbd/permissions.sh
new file mode 100755
index 000000000..f8a9aaa71
--- /dev/null
+++ b/qa/workunits/rbd/permissions.sh
@@ -0,0 +1,269 @@
+#!/usr/bin/env bash
+set -ex
+
+IMAGE_FEATURES="layering,exclusive-lock,object-map,fast-diff"
+
+clone_v2_enabled() {
+ image_spec=$1
+ rbd info $image_spec | grep "clone-parent"
+}
+
+create_pools() {
+ ceph osd pool create images 32
+ rbd pool init images
+ ceph osd pool create volumes 32
+ rbd pool init volumes
+}
+
+delete_pools() {
+ (ceph osd pool delete images images --yes-i-really-really-mean-it || true) >/dev/null 2>&1
+ (ceph osd pool delete volumes volumes --yes-i-really-really-mean-it || true) >/dev/null 2>&1
+
+}
+
+recreate_pools() {
+ delete_pools
+ create_pools
+}
+
+delete_users() {
+ (ceph auth del client.volumes || true) >/dev/null 2>&1
+ (ceph auth del client.images || true) >/dev/null 2>&1
+
+ (ceph auth del client.snap_none || true) >/dev/null 2>&1
+ (ceph auth del client.snap_all || true) >/dev/null 2>&1
+ (ceph auth del client.snap_pool || true) >/dev/null 2>&1
+ (ceph auth del client.snap_profile_all || true) >/dev/null 2>&1
+ (ceph auth del client.snap_profile_pool || true) >/dev/null 2>&1
+
+ (ceph auth del client.mon_write || true) >/dev/null 2>&1
+}
+
+create_users() {
+ ceph auth get-or-create client.volumes \
+ mon 'profile rbd' \
+ osd 'profile rbd pool=volumes, profile rbd-read-only pool=images' \
+ mgr 'profile rbd pool=volumes, profile rbd-read-only pool=images' >> $KEYRING
+ ceph auth get-or-create client.images mon 'profile rbd' osd 'profile rbd pool=images' >> $KEYRING
+
+ ceph auth get-or-create client.snap_none mon 'allow r' >> $KEYRING
+ ceph auth get-or-create client.snap_all mon 'allow r' osd 'allow w' >> $KEYRING
+ ceph auth get-or-create client.snap_pool mon 'allow r' osd 'allow w pool=images' >> $KEYRING
+ ceph auth get-or-create client.snap_profile_all mon 'allow r' osd 'profile rbd' >> $KEYRING
+ ceph auth get-or-create client.snap_profile_pool mon 'allow r' osd 'profile rbd pool=images' >> $KEYRING
+
+ ceph auth get-or-create client.mon_write mon 'allow *' >> $KEYRING
+}
+
+expect() {
+
+ set +e
+
+ local expected_ret=$1
+ local ret
+
+ shift
+ cmd=$@
+
+ eval $cmd
+ ret=$?
+
+ set -e
+
+ if [[ $ret -ne $expected_ret ]]; then
+ echo "ERROR: running \'$cmd\': expected $expected_ret got $ret"
+ return 1
+ fi
+
+ return 0
+}
+
+test_images_access() {
+ rbd -k $KEYRING --id images create --image-format 2 --image-feature $IMAGE_FEATURES -s 1 images/foo
+ rbd -k $KEYRING --id images snap create images/foo@snap
+ rbd -k $KEYRING --id images snap protect images/foo@snap
+ rbd -k $KEYRING --id images snap unprotect images/foo@snap
+ rbd -k $KEYRING --id images snap protect images/foo@snap
+ rbd -k $KEYRING --id images export images/foo@snap - >/dev/null
+ expect 16 rbd -k $KEYRING --id images snap rm images/foo@snap
+
+ rbd -k $KEYRING --id volumes clone --image-feature $IMAGE_FEATURES images/foo@snap volumes/child
+
+ if ! clone_v2_enabled images/foo; then
+ expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
+ fi
+
+ expect 1 rbd -k $KEYRING --id volumes snap unprotect images/foo@snap
+ expect 1 rbd -k $KEYRING --id images flatten volumes/child
+ rbd -k $KEYRING --id volumes flatten volumes/child
+ expect 1 rbd -k $KEYRING --id volumes snap unprotect images/foo@snap
+ rbd -k $KEYRING --id images snap unprotect images/foo@snap
+
+ expect 39 rbd -k $KEYRING --id images rm images/foo
+ rbd -k $KEYRING --id images snap rm images/foo@snap
+ rbd -k $KEYRING --id images rm images/foo
+ rbd -k $KEYRING --id volumes rm volumes/child
+}
+
+test_volumes_access() {
+ rbd -k $KEYRING --id images create --image-format 2 --image-feature $IMAGE_FEATURES -s 1 images/foo
+ rbd -k $KEYRING --id images snap create images/foo@snap
+ rbd -k $KEYRING --id images snap protect images/foo@snap
+
+ # commands that work with read-only access
+ rbd -k $KEYRING --id volumes info images/foo@snap
+ rbd -k $KEYRING --id volumes snap ls images/foo
+ rbd -k $KEYRING --id volumes export images/foo - >/dev/null
+ rbd -k $KEYRING --id volumes cp images/foo volumes/foo_copy
+ rbd -k $KEYRING --id volumes rm volumes/foo_copy
+ rbd -k $KEYRING --id volumes children images/foo@snap
+ rbd -k $KEYRING --id volumes lock list images/foo
+
+ # commands that fail with read-only access
+ expect 1 rbd -k $KEYRING --id volumes resize -s 2 images/foo --allow-shrink
+ expect 1 rbd -k $KEYRING --id volumes snap create images/foo@2
+ expect 1 rbd -k $KEYRING --id volumes snap rollback images/foo@snap
+ expect 1 rbd -k $KEYRING --id volumes snap remove images/foo@snap
+ expect 1 rbd -k $KEYRING --id volumes snap purge images/foo
+ expect 1 rbd -k $KEYRING --id volumes snap unprotect images/foo@snap
+ expect 1 rbd -k $KEYRING --id volumes flatten images/foo
+ expect 1 rbd -k $KEYRING --id volumes lock add images/foo test
+ expect 1 rbd -k $KEYRING --id volumes lock remove images/foo test locker
+ expect 1 rbd -k $KEYRING --id volumes ls rbd
+
+ # create clone and snapshot
+ rbd -k $KEYRING --id volumes clone --image-feature $IMAGE_FEATURES images/foo@snap volumes/child
+ rbd -k $KEYRING --id volumes snap create volumes/child@snap1
+ rbd -k $KEYRING --id volumes snap protect volumes/child@snap1
+ rbd -k $KEYRING --id volumes snap create volumes/child@snap2
+
+ # make sure original snapshot stays protected
+ if clone_v2_enabled images/foo; then
+ rbd -k $KEYRING --id volumes flatten volumes/child
+ rbd -k $KEYRING --id volumes snap rm volumes/child@snap2
+ rbd -k $KEYRING --id volumes snap unprotect volumes/child@snap1
+ else
+ expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
+ rbd -k $KEYRING --id volumes flatten volumes/child
+ expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
+ rbd -k $KEYRING --id volumes snap rm volumes/child@snap2
+ expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
+ expect 2 rbd -k $KEYRING --id volumes snap rm volumes/child@snap2
+ rbd -k $KEYRING --id volumes snap unprotect volumes/child@snap1
+ expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
+ fi
+
+ # clean up
+ rbd -k $KEYRING --id volumes snap rm volumes/child@snap1
+ rbd -k $KEYRING --id images snap unprotect images/foo@snap
+ rbd -k $KEYRING --id images snap rm images/foo@snap
+ rbd -k $KEYRING --id images rm images/foo
+ rbd -k $KEYRING --id volumes rm volumes/child
+}
+
+create_self_managed_snapshot() {
+ ID=$1
+ POOL=$2
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python3
+import rados
+
+with rados.Rados(conffile="", rados_id="${ID}") as cluster:
+ ioctx = cluster.open_ioctx("${POOL}")
+
+ snap_id = ioctx.create_self_managed_snap()
+ print ("Created snap id {}".format(snap_id))
+EOF
+}
+
+remove_self_managed_snapshot() {
+ ID=$1
+ POOL=$2
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python3
+import rados
+
+with rados.Rados(conffile="", rados_id="mon_write") as cluster1, \
+ rados.Rados(conffile="", rados_id="${ID}") as cluster2:
+ ioctx1 = cluster1.open_ioctx("${POOL}")
+
+ snap_id = ioctx1.create_self_managed_snap()
+ print ("Created snap id {}".format(snap_id))
+
+ ioctx2 = cluster2.open_ioctx("${POOL}")
+
+ ioctx2.remove_self_managed_snap(snap_id)
+ print ("Removed snap id {}".format(snap_id))
+EOF
+}
+
+test_remove_self_managed_snapshots() {
+ # Ensure users cannot create self-managed snapshots w/o permissions
+ expect 1 create_self_managed_snapshot snap_none images
+ expect 1 create_self_managed_snapshot snap_none volumes
+
+ create_self_managed_snapshot snap_all images
+ create_self_managed_snapshot snap_all volumes
+
+ create_self_managed_snapshot snap_pool images
+ expect 1 create_self_managed_snapshot snap_pool volumes
+
+ create_self_managed_snapshot snap_profile_all images
+ create_self_managed_snapshot snap_profile_all volumes
+
+ create_self_managed_snapshot snap_profile_pool images
+ expect 1 create_self_managed_snapshot snap_profile_pool volumes
+
+ # Ensure users cannot delete self-managed snapshots w/o permissions
+ expect 1 remove_self_managed_snapshot snap_none images
+ expect 1 remove_self_managed_snapshot snap_none volumes
+
+ remove_self_managed_snapshot snap_all images
+ remove_self_managed_snapshot snap_all volumes
+
+ remove_self_managed_snapshot snap_pool images
+ expect 1 remove_self_managed_snapshot snap_pool volumes
+
+ remove_self_managed_snapshot snap_profile_all images
+ remove_self_managed_snapshot snap_profile_all volumes
+
+ remove_self_managed_snapshot snap_profile_pool images
+ expect 1 remove_self_managed_snapshot snap_profile_pool volumes
+}
+
+test_rbd_support() {
+ # read-only commands should work on both pools
+ ceph -k $KEYRING --id volumes rbd perf image stats volumes
+ ceph -k $KEYRING --id volumes rbd perf image stats images
+
+ # read/write commands should only work on 'volumes'
+ rbd -k $KEYRING --id volumes create --image-format 2 --image-feature $IMAGE_FEATURES -s 1 volumes/foo
+ ceph -k $KEYRING --id volumes rbd task add remove volumes/foo
+ expect 13 ceph -k $KEYRING --id volumes rbd task add remove images/foo
+}
+
+cleanup() {
+ rm -f $KEYRING
+}
+
+KEYRING=$(mktemp)
+trap cleanup EXIT ERR HUP INT QUIT
+
+delete_users
+create_users
+
+recreate_pools
+test_images_access
+
+recreate_pools
+test_volumes_access
+
+test_remove_self_managed_snapshots
+
+test_rbd_support
+
+delete_pools
+delete_users
+
+echo OK
+exit 0
diff --git a/qa/workunits/rbd/qemu-iotests.sh b/qa/workunits/rbd/qemu-iotests.sh
new file mode 100755
index 000000000..a2e9e0600
--- /dev/null
+++ b/qa/workunits/rbd/qemu-iotests.sh
@@ -0,0 +1,47 @@
+#!/bin/sh -ex
+
+# Run qemu-iotests against rbd. These are block-level tests that go
+# through qemu but do not involve running a full vm. Note that these
+# require the admin ceph user, as there's no way to pass the ceph user
+# to qemu-iotests currently.
+
+testlist='001 002 003 004 005 008 009 010 011 021 025 032 033'
+
+git clone https://github.com/qemu/qemu.git
+cd qemu
+
+
+if grep -iqE '(bionic|focal|jammy|platform:el9)' /etc/os-release; then
+ git checkout v2.11.0
+elif grep -iqE '(xenial|platform:el8)' /etc/os-release; then
+ git checkout v2.3.0
+else
+ # use v2.2.0-rc3 (last released version that handles all the tests
+ git checkout 2528043f1f299e0e88cb026f1ca7c40bbb4e1f80
+fi
+
+cd tests/qemu-iotests
+# qemu-iotests expects a binary called just 'qemu' to be available
+if [ -x '/usr/bin/qemu-system-x86_64' ]
+then
+ QEMU='/usr/bin/qemu-system-x86_64'
+else
+ QEMU='/usr/libexec/qemu-kvm'
+fi
+
+# Bionic (v2.11.0) tests expect all tools in current directory
+ln -s $QEMU qemu
+ln -s /usr/bin/qemu-img
+ln -s /usr/bin/qemu-io
+ln -s /usr/bin/qemu-nbd
+
+# this is normally generated by configure, but has nothing but a python
+# binary definition, which we don't care about. for some reason it is
+# not present on trusty.
+touch common.env
+
+# TEST_DIR is the pool for rbd
+TEST_DIR=rbd ./check -rbd $testlist
+
+cd ../../..
+rm -rf qemu
diff --git a/qa/workunits/rbd/qemu_dynamic_features.sh b/qa/workunits/rbd/qemu_dynamic_features.sh
new file mode 100755
index 000000000..70e9fbb3c
--- /dev/null
+++ b/qa/workunits/rbd/qemu_dynamic_features.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+set -x
+
+if [[ -z "${IMAGE_NAME}" ]]; then
+ echo image name must be provided
+ exit 1
+fi
+
+is_qemu_running() {
+ rbd status ${IMAGE_NAME} | grep -v "Watchers: none"
+}
+
+wait_for_qemu() {
+ while ! is_qemu_running ; do
+ echo "*** Waiting for QEMU"
+ sleep 30
+ done
+}
+
+wait_for_qemu
+rbd feature disable ${IMAGE_NAME} journaling
+rbd feature disable ${IMAGE_NAME} object-map
+rbd feature disable ${IMAGE_NAME} exclusive-lock
+
+while is_qemu_running ; do
+ echo "*** Enabling all features"
+ rbd feature enable ${IMAGE_NAME} exclusive-lock || break
+ rbd feature enable ${IMAGE_NAME} journaling || break
+ rbd feature enable ${IMAGE_NAME} object-map || break
+ if is_qemu_running ; then
+ sleep 60
+ fi
+
+ echo "*** Disabling all features"
+ rbd feature disable ${IMAGE_NAME} journaling || break
+ rbd feature disable ${IMAGE_NAME} object-map || break
+ rbd feature disable ${IMAGE_NAME} exclusive-lock || break
+ if is_qemu_running ; then
+ sleep 60
+ fi
+done
+
+if is_qemu_running ; then
+ echo "RBD command failed on alive QEMU"
+ exit 1
+fi
diff --git a/qa/workunits/rbd/qemu_rebuild_object_map.sh b/qa/workunits/rbd/qemu_rebuild_object_map.sh
new file mode 100755
index 000000000..2647dcdcd
--- /dev/null
+++ b/qa/workunits/rbd/qemu_rebuild_object_map.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+set -ex
+
+if [[ -z "${IMAGE_NAME}" ]]; then
+ echo image name must be provided
+ exit 1
+fi
+
+is_qemu_running() {
+ rbd status ${IMAGE_NAME} | grep -v "Watchers: none"
+}
+
+wait_for_qemu() {
+ while ! is_qemu_running ; do
+ echo "*** Waiting for QEMU"
+ sleep 30
+ done
+}
+
+wait_for_qemu
+rbd feature disable ${IMAGE_NAME} journaling || true
+rbd feature disable ${IMAGE_NAME} fast-diff || true
+rbd feature disable ${IMAGE_NAME} object-map || true
+rbd feature disable ${IMAGE_NAME} exclusive-lock || true
+
+rbd feature enable ${IMAGE_NAME} exclusive-lock
+rbd feature enable ${IMAGE_NAME} object-map
+
+while is_qemu_running ; do
+ echo "*** Rebuilding object map"
+ rbd object-map rebuild ${IMAGE_NAME}
+
+ if is_qemu_running ; then
+ sleep 60
+ fi
+done
+
diff --git a/qa/workunits/rbd/qos.sh b/qa/workunits/rbd/qos.sh
new file mode 100755
index 000000000..feb1d5144
--- /dev/null
+++ b/qa/workunits/rbd/qos.sh
@@ -0,0 +1,90 @@
+#!/bin/sh -ex
+
+POOL=rbd
+IMAGE=test$$
+IMAGE_SIZE=1G
+TOLERANCE_PRCNT=10
+
+rbd_bench() {
+ local image=$1
+ local type=$2
+ local total=$3
+ local qos_type=$4
+ local qos_limit=$5
+ local iops_var_name=$6
+ local bps_var_name=$7
+ local timeout=$8
+ local timeout_cmd=""
+
+ if [ -n "${timeout}" ]; then
+ timeout_cmd="timeout --preserve-status ${timeout}"
+ fi
+
+ # parse `rbd bench` output for string like this:
+ # elapsed: 25 ops: 2560 ops/sec: 100.08 bytes/sec: 409.13 MiB
+ iops_bps=$(${timeout_cmd} rbd bench "${image}" \
+ --io-type ${type} --io-size 4K \
+ --io-total ${total} --rbd-cache=false \
+ --rbd_qos_${qos_type}_limit ${qos_limit} |
+ awk '/elapsed:.* GiB/ {print int($6) ":" int($8) * 1024 * 1024 * 1024}
+ /elapsed:.* MiB/ {print int($6) ":" int($8) * 1024 * 1024}
+ /elapsed:.* KiB/ {print int($6) ":" int($8) * 1024}
+ /elapsed:.* B/ {print int($6) ":" int($8)}')
+ eval ${iops_var_name}=${iops_bps%:*}
+ eval ${bps_var_name}=${iops_bps#*:}
+}
+
+rbd create "${POOL}/${IMAGE}" -s ${IMAGE_SIZE}
+rbd bench "${POOL}/${IMAGE}" --io-type write --io-size 4M --io-total ${IMAGE_SIZE}
+
+rbd_bench "${POOL}/${IMAGE}" write ${IMAGE_SIZE} iops 0 iops bps 60
+iops_unlimited=$iops
+bps_unlimited=$bps
+
+test "${iops_unlimited}" -ge 20 || exit 0
+
+io_total=$((bps_unlimited * 30))
+
+rbd_bench "${POOL}/${IMAGE}" write ${io_total} iops $((iops_unlimited / 2)) iops bps
+test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd_bench "${POOL}/${IMAGE}" write ${io_total} write_iops $((iops_unlimited / 2)) iops bps
+test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd_bench "${POOL}/${IMAGE}" write ${io_total} bps $((bps_unlimited / 2)) iops bps
+test "${bps}" -le $((bps_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd_bench "${POOL}/${IMAGE}" write ${io_total} write_bps $((bps_unlimited / 2)) iops bps
+test "${bps}" -le $((bps_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd_bench "${POOL}/${IMAGE}" read ${io_total} iops 0 iops bps
+iops_unlimited=$iops
+bps_unlimited=$bps
+
+test "${iops_unlimited}" -ge 20 || exit 0
+
+io_total=$((bps_unlimited * 30))
+
+rbd_bench "${POOL}/${IMAGE}" read ${io_total} iops $((iops_unlimited / 2)) iops bps
+test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd_bench "${POOL}/${IMAGE}" read ${io_total} read_iops $((iops_unlimited / 2)) iops bps
+test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd_bench "${POOL}/${IMAGE}" read ${io_total} bps $((bps_unlimited / 2)) iops bps
+test "${bps}" -le $((bps_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd_bench "${POOL}/${IMAGE}" read ${io_total} read_bps $((bps_unlimited / 2)) iops bps
+test "${bps}" -le $((bps_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+# test a config override is applied
+rbd config image set "${POOL}/${IMAGE}" rbd_qos_iops_limit $((iops_unlimited / 4))
+rbd_bench "${POOL}/${IMAGE}" read ${io_total} iops $((iops_unlimited / 2)) iops bps
+test "${iops}" -le $((iops_unlimited / 4 * (100 + TOLERANCE_PRCNT) / 100))
+rbd config image remove "${POOL}/${IMAGE}" rbd_qos_iops_limit
+rbd_bench "${POOL}/${IMAGE}" read ${io_total} iops $((iops_unlimited / 2)) iops bps
+test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd rm "${POOL}/${IMAGE}"
+
+echo OK
diff --git a/qa/workunits/rbd/rbd-ggate.sh b/qa/workunits/rbd/rbd-ggate.sh
new file mode 100755
index 000000000..1bf89da38
--- /dev/null
+++ b/qa/workunits/rbd/rbd-ggate.sh
@@ -0,0 +1,239 @@
+#!/bin/sh -ex
+
+POOL=testrbdggate$$
+NS=ns
+IMAGE=test
+SIZE=64
+DATA=
+DEV=
+
+if which xmlstarlet > /dev/null 2>&1; then
+ XMLSTARLET=xmlstarlet
+elif which xml > /dev/null 2>&1; then
+ XMLSTARLET=xml
+else
+ echo "Missing xmlstarlet binary!"
+ exit 1
+fi
+
+if [ `uname -K` -ge 1200078 ] ; then
+ RBD_GGATE_RESIZE_SUPPORTED=1
+fi
+
+_sudo()
+{
+ local cmd
+
+ if [ `id -u` -eq 0 ]
+ then
+ "$@"
+ return $?
+ fi
+
+ # Look for the command in the user path. If it fails run it as is,
+ # supposing it is in sudo path.
+ cmd=`which $1 2>/dev/null` || cmd=$1
+ shift
+ sudo -nE "${cmd}" "$@"
+}
+
+check_geom_gate()
+{
+ # See if geom_date is load, or can be loaded.
+ # Otherwise the tests can not run
+ if ! kldstat -q -n geom_gate ; then
+ # See if we can load it
+ if ! _sudo kldload geom_gate ; then
+ echo Not able to load geom_gate
+ echo check /var/log/messages as to why
+ exit 1
+ fi
+ fi
+}
+
+setup()
+{
+ local ns x
+
+ if [ -e CMakeCache.txt ]; then
+ # running under cmake build dir
+
+ CEPH_SRC=$(readlink -f $(dirname $0)/../../../src)
+ CEPH_ROOT=${PWD}
+ CEPH_BIN=${CEPH_ROOT}/bin
+
+ export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
+ export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind:${CEPH_ROOT}/lib/cython_modules/lib.3
+ PATH=${CEPH_BIN}:${PATH}
+ fi
+
+ _sudo echo test sudo
+ check_geom_gate
+
+ trap cleanup INT TERM EXIT
+ TEMPDIR=`mktemp -d`
+ DATA=${TEMPDIR}/data
+ dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
+ ceph osd pool create ${POOL} 32
+
+ rbd namespace create ${POOL}/${NS}
+ for ns in '' ${NS}; do
+ rbd --dest-pool ${POOL} --dest-namespace "${ns}" --no-progress import \
+ ${DATA} ${IMAGE}
+ done
+}
+
+cleanup()
+{
+ local ns s
+
+ set +e
+ rm -Rf ${TEMPDIR}
+ if [ -n "${DEV}" ]
+ then
+ _sudo rbd-ggate unmap ${DEV}
+ fi
+
+ ceph osd pool delete ${POOL} ${POOL} --yes-i-really-really-mean-it
+}
+
+expect_false()
+{
+ if "$@"; then return 1; else return 0; fi
+}
+
+#
+# main
+#
+
+setup
+
+echo exit status test
+expect_false rbd-ggate
+expect_false rbd-ggate INVALIDCMD
+if [ `id -u` -ne 0 ]
+then
+ expect_false rbd-ggate map ${IMAGE}
+fi
+expect_false _sudo rbd-ggate map INVALIDIMAGE
+
+echo map test using the first unused device
+DEV=`_sudo rbd-ggate map ${POOL}/${IMAGE}`
+rbd-ggate list | grep " ${DEV} *$"
+
+echo map test specifying the device
+expect_false _sudo rbd-ggate --device ${DEV} map ${POOL}/${IMAGE}
+dev1=${DEV}
+_sudo rbd-ggate unmap ${DEV}
+rbd-ggate list | expect_false grep " ${DEV} *$"
+DEV=
+# XXX: race possible when the device is reused by other process
+DEV=`_sudo rbd-ggate --device ${dev1} map ${POOL}/${IMAGE}`
+[ "${DEV}" = "${dev1}" ]
+rbd-ggate list | grep " ${DEV} *$"
+
+echo list format test
+expect_false _sudo rbd-ggate --format INVALID list
+rbd-ggate --format json --pretty-format list
+rbd-ggate --format xml list
+
+echo read test
+[ "`dd if=${DATA} bs=1M | md5`" = "`_sudo dd if=${DEV} bs=1M | md5`" ]
+
+echo write test
+dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
+_sudo dd if=${DATA} of=${DEV} bs=1M
+_sudo sync
+[ "`dd if=${DATA} bs=1M | md5`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5`" ]
+
+echo trim test
+provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
+used=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
+[ "${used}" -eq "${provisioned}" ]
+_sudo newfs -E ${DEV}
+_sudo sync
+provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
+used=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
+[ "${used}" -lt "${provisioned}" ]
+
+echo resize test
+devname=$(basename ${DEV})
+size=$(geom gate list ${devname} | awk '$1 ~ /Mediasize:/ {print $2}')
+test -n "${size}"
+rbd resize ${POOL}/${IMAGE} --size $((SIZE * 2))M
+rbd info ${POOL}/${IMAGE}
+if [ -z "$RBD_GGATE_RESIZE_SUPPORTED" ]; then
+ # when resizing is not supported:
+ # resizing the underlying image for a GEOM ggate will stop the
+ # ggate process servicing the device. So we can resize and test
+ # the disappearance of the device
+ rbd-ggate list | expect_false grep " ${DEV} *$"
+else
+ rbd-ggate list | grep " ${DEV} *$"
+ size2=$(geom gate list ${devname} | awk '$1 ~ /Mediasize:/ {print $2}')
+ test -n "${size2}"
+ test ${size2} -eq $((size * 2))
+ dd if=/dev/urandom of=${DATA} bs=1M count=$((SIZE * 2))
+ _sudo dd if=${DATA} of=${DEV} bs=1M
+ _sudo sync
+ [ "`dd if=${DATA} bs=1M | md5`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5`" ]
+ rbd resize ${POOL}/${IMAGE} --allow-shrink --size ${SIZE}M
+ rbd info ${POOL}/${IMAGE}
+ size2=$(geom gate list ${devname} | awk '$1 ~ /Mediasize:/ {print $2}')
+ test -n "${size2}"
+ test ${size2} -eq ${size}
+ truncate -s ${SIZE}M ${DATA}
+ [ "`dd if=${DATA} bs=1M | md5`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5`" ]
+ _sudo rbd-ggate unmap ${DEV}
+fi
+DEV=
+
+echo read-only option test
+DEV=`_sudo rbd-ggate map --read-only ${POOL}/${IMAGE}`
+devname=$(basename ${DEV})
+rbd-ggate list | grep " ${DEV} *$"
+access=$(geom gate list ${devname} | awk '$1 == "access:" {print $2}')
+test "${access}" = "read-only"
+_sudo dd if=${DEV} of=/dev/null bs=1M
+expect_false _sudo dd if=${DATA} of=${DEV} bs=1M
+_sudo rbd-ggate unmap ${DEV}
+
+echo exclusive option test
+DEV=`_sudo rbd-ggate map --exclusive ${POOL}/${IMAGE}`
+rbd-ggate list | grep " ${DEV} *$"
+_sudo dd if=${DATA} of=${DEV} bs=1M
+_sudo sync
+expect_false timeout 10 \
+ rbd -p ${POOL} bench ${IMAGE} --io-type=write --io-size=1024 --io-total=1024
+_sudo rbd-ggate unmap ${DEV}
+DEV=
+rbd bench -p ${POOL} ${IMAGE} --io-type=write --io-size=1024 --io-total=1024
+
+echo unmap by image name test
+DEV=`_sudo rbd-ggate map ${POOL}/${IMAGE}`
+rbd-ggate list | grep " ${DEV} *$"
+_sudo rbd-ggate unmap "${POOL}/${IMAGE}"
+rbd-ggate list | expect_false grep " ${DEV} *$"
+DEV=
+
+echo map/unmap snap test
+rbd snap create ${POOL}/${IMAGE}@snap
+DEV=`_sudo rbd-ggate map ${POOL}/${IMAGE}@snap`
+rbd-ggate list | grep " ${DEV} *$"
+_sudo rbd-ggate unmap "${POOL}/${IMAGE}@snap"
+rbd-ggate list | expect_false grep " ${DEV} *$"
+DEV=
+
+echo map/unmap namespace test
+rbd snap create ${POOL}/${NS}/${IMAGE}@snap
+DEV=`_sudo rbd-ggate map ${POOL}/${NS}/${IMAGE}@snap`
+rbd-ggate list | grep " ${DEV} *$"
+_sudo rbd-ggate unmap "${POOL}/${NS}/${IMAGE}@snap"
+rbd-ggate list | expect_false grep "${DEV} $"
+DEV=
+
+echo OK
diff --git a/qa/workunits/rbd/rbd-nbd.sh b/qa/workunits/rbd/rbd-nbd.sh
new file mode 100755
index 000000000..bc89e9be5
--- /dev/null
+++ b/qa/workunits/rbd/rbd-nbd.sh
@@ -0,0 +1,500 @@
+#!/usr/bin/env bash
+set -ex
+
+. $(dirname $0)/../../standalone/ceph-helpers.sh
+
+POOL=rbd
+ANOTHER_POOL=new_default_pool$$
+NS=ns
+IMAGE=testrbdnbd$$
+SIZE=64
+DATA=
+DEV=
+
+_sudo()
+{
+ local cmd
+
+ if [ `id -u` -eq 0 ]
+ then
+ "$@"
+ return $?
+ fi
+
+ # Look for the command in the user path. If it fails run it as is,
+ # supposing it is in sudo path.
+ cmd=`which $1 2>/dev/null` || cmd=$1
+ shift
+ sudo -nE "${cmd}" "$@"
+}
+
+setup()
+{
+ local ns x
+
+ if [ -e CMakeCache.txt ]; then
+ # running under cmake build dir
+
+ CEPH_SRC=$(readlink -f $(dirname $0)/../../../src)
+ CEPH_ROOT=${PWD}
+ CEPH_BIN=${CEPH_ROOT}/bin
+
+ export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
+ export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind:${CEPH_ROOT}/lib/cython_modules/lib.3
+ PATH=${CEPH_BIN}:${PATH}
+ fi
+
+ _sudo echo test sudo
+
+ trap cleanup INT TERM EXIT
+ TEMPDIR=`mktemp -d`
+ DATA=${TEMPDIR}/data
+ dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
+
+ rbd namespace create ${POOL}/${NS}
+
+ for ns in '' ${NS}; do
+ rbd --dest-pool ${POOL} --dest-namespace "${ns}" --no-progress import \
+ ${DATA} ${IMAGE}
+ done
+
+ # create another pool
+ ceph osd pool create ${ANOTHER_POOL} 8
+ rbd pool init ${ANOTHER_POOL}
+}
+
+function cleanup()
+{
+ local ns s
+
+ set +e
+
+ mount | fgrep ${TEMPDIR}/mnt && _sudo umount -f ${TEMPDIR}/mnt
+
+ rm -Rf ${TEMPDIR}
+ if [ -n "${DEV}" ]
+ then
+ _sudo rbd device --device-type nbd unmap ${DEV}
+ fi
+
+ for ns in '' ${NS}; do
+ if rbd -p ${POOL} --namespace "${ns}" status ${IMAGE} 2>/dev/null; then
+ for s in 0.5 1 2 4 8 16 32; do
+ sleep $s
+ rbd -p ${POOL} --namespace "${ns}" status ${IMAGE} |
+ grep 'Watchers: none' && break
+ done
+ rbd -p ${POOL} --namespace "${ns}" snap purge ${IMAGE}
+ rbd -p ${POOL} --namespace "${ns}" remove ${IMAGE}
+ fi
+ done
+ rbd namespace remove ${POOL}/${NS}
+
+ # cleanup/reset default pool
+ rbd config global rm global rbd_default_pool
+ ceph osd pool delete ${ANOTHER_POOL} ${ANOTHER_POOL} --yes-i-really-really-mean-it
+}
+
+function expect_false()
+{
+ if "$@"; then return 1; else return 0; fi
+}
+
+function get_pid()
+{
+ local pool=$1
+ local ns=$2
+
+ PID=$(rbd device --device-type nbd --format xml list | $XMLSTARLET sel -t -v \
+ "//devices/device[pool='${pool}'][namespace='${ns}'][image='${IMAGE}'][device='${DEV}']/id")
+ test -n "${PID}" || return 1
+ ps -p ${PID} -C rbd-nbd
+}
+
+unmap_device()
+{
+ local args=$1
+ local pid=$2
+
+ _sudo rbd device --device-type nbd unmap ${args}
+ rbd device --device-type nbd list | expect_false grep "^${pid}\\b" || return 1
+ ps -C rbd-nbd | expect_false grep "^ *${pid}\\b" || return 1
+
+ # workaround possible race between unmap and following map
+ sleep 0.5
+}
+
+#
+# main
+#
+
+setup
+
+# exit status test
+expect_false rbd-nbd
+expect_false rbd-nbd INVALIDCMD
+if [ `id -u` -ne 0 ]
+then
+ expect_false rbd device --device-type nbd map ${IMAGE}
+fi
+expect_false _sudo rbd device --device-type nbd map INVALIDIMAGE
+expect_false _sudo rbd-nbd --device INVALIDDEV map ${IMAGE}
+
+# list format test
+expect_false rbd device --device-type nbd --format INVALID list
+rbd device --device-type nbd --format json --pretty-format list
+rbd device --device-type nbd --format xml list
+
+# map test using the first unused device
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+# map test specifying the device
+expect_false _sudo rbd-nbd --device ${DEV} map ${POOL}/${IMAGE}
+dev1=${DEV}
+unmap_device ${DEV} ${PID}
+DEV=
+# XXX: race possible when the device is reused by other process
+DEV=`_sudo rbd-nbd --device ${dev1} map ${POOL}/${IMAGE}`
+[ "${DEV}" = "${dev1}" ]
+rbd device --device-type nbd list | grep "${IMAGE}"
+get_pid ${POOL}
+
+# read test
+[ "`dd if=${DATA} bs=1M | md5sum`" = "`_sudo dd if=${DEV} bs=1M | md5sum`" ]
+
+# write test
+dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
+_sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
+[ "`dd if=${DATA} bs=1M | md5sum`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5sum`" ]
+unmap_device ${DEV} ${PID}
+
+# notrim test
+DEV=`_sudo rbd device --device-type nbd --options notrim map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
+used=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
+[ "${used}" -eq "${provisioned}" ]
+# should fail discard as at time of mapping notrim was used
+expect_false _sudo blkdiscard ${DEV}
+sync
+provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
+used=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
+[ "${used}" -eq "${provisioned}" ]
+unmap_device ${DEV} ${PID}
+
+# trim test
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
+used=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
+[ "${used}" -eq "${provisioned}" ]
+# should honor discard as at time of mapping trim was considered by default
+_sudo blkdiscard ${DEV}
+sync
+provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
+used=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
+[ "${used}" -lt "${provisioned}" ]
+
+# resize test
+devname=$(basename ${DEV})
+blocks=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
+test -n "${blocks}"
+rbd resize ${POOL}/${IMAGE} --size $((SIZE * 2))M
+rbd info ${POOL}/${IMAGE}
+blocks2=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
+test -n "${blocks2}"
+test ${blocks2} -eq $((blocks * 2))
+rbd resize ${POOL}/${IMAGE} --allow-shrink --size ${SIZE}M
+blocks2=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
+test -n "${blocks2}"
+test ${blocks2} -eq ${blocks}
+
+# read-only option test
+unmap_device ${DEV} ${PID}
+DEV=`_sudo rbd --device-type nbd map --read-only ${POOL}/${IMAGE}`
+PID=$(rbd device --device-type nbd list | awk -v pool=${POOL} -v img=${IMAGE} -v dev=${DEV} \
+ '$2 == pool && $3 == img && $5 == dev {print $1}')
+test -n "${PID}"
+ps -p ${PID} -C rbd-nbd
+
+_sudo dd if=${DEV} of=/dev/null bs=1M
+expect_false _sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
+unmap_device ${DEV} ${PID}
+
+# exclusive option test
+DEV=`_sudo rbd --device-type nbd map --exclusive ${POOL}/${IMAGE}`
+get_pid ${POOL}
+
+_sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
+expect_false timeout 10 \
+ rbd bench ${IMAGE} --io-type write --io-size=1024 --io-total=1024
+unmap_device ${DEV} ${PID}
+DEV=
+rbd bench ${IMAGE} --io-type write --io-size=1024 --io-total=1024
+
+# unmap by image name test
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+unmap_device ${IMAGE} ${PID}
+DEV=
+
+# map/unmap snap test
+rbd snap create ${POOL}/${IMAGE}@snap
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}@snap`
+get_pid ${POOL}
+unmap_device "${IMAGE}@snap" ${PID}
+DEV=
+
+# map/unmap snap test with --snap-id
+SNAPID=`rbd snap ls ${POOL}/${IMAGE} | awk '$2 == "snap" {print $1}'`
+DEV=`_sudo rbd device --device-type nbd map --snap-id ${SNAPID} ${POOL}/${IMAGE}`
+get_pid ${POOL}
+unmap_device "--snap-id ${SNAPID} ${IMAGE}" ${PID}
+DEV=
+
+# map/unmap namespace test
+rbd snap create ${POOL}/${NS}/${IMAGE}@snap
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${NS}/${IMAGE}@snap`
+get_pid ${POOL} ${NS}
+unmap_device "${POOL}/${NS}/${IMAGE}@snap" ${PID}
+DEV=
+
+# map/unmap namespace test with --snap-id
+SNAPID=`rbd snap ls ${POOL}/${NS}/${IMAGE} | awk '$2 == "snap" {print $1}'`
+DEV=`_sudo rbd device --device-type nbd map --snap-id ${SNAPID} ${POOL}/${NS}/${IMAGE}`
+get_pid ${POOL} ${NS}
+unmap_device "--snap-id ${SNAPID} ${POOL}/${NS}/${IMAGE}" ${PID}
+DEV=
+
+# map/unmap namespace using options test
+DEV=`_sudo rbd device --device-type nbd map --pool ${POOL} --namespace ${NS} --image ${IMAGE}`
+get_pid ${POOL} ${NS}
+unmap_device "--pool ${POOL} --namespace ${NS} --image ${IMAGE}" ${PID}
+DEV=`_sudo rbd device --device-type nbd map --pool ${POOL} --namespace ${NS} --image ${IMAGE} --snap snap`
+get_pid ${POOL} ${NS}
+unmap_device "--pool ${POOL} --namespace ${NS} --image ${IMAGE} --snap snap" ${PID}
+DEV=
+
+# unmap by image name test 2
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+pid=$PID
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${NS}/${IMAGE}`
+get_pid ${POOL} ${NS}
+unmap_device ${POOL}/${NS}/${IMAGE} ${PID}
+DEV=
+unmap_device ${POOL}/${IMAGE} ${pid}
+
+# map/unmap test with just image name and expect image to come from default pool
+if [ "${POOL}" = "rbd" ];then
+ DEV=`_sudo rbd device --device-type nbd map ${IMAGE}`
+ get_pid ${POOL}
+ unmap_device ${IMAGE} ${PID}
+ DEV=
+fi
+
+# map/unmap test with just image name after changing default pool
+rbd config global set global rbd_default_pool ${ANOTHER_POOL}
+rbd create --size 10M ${IMAGE}
+DEV=`_sudo rbd device --device-type nbd map ${IMAGE}`
+get_pid ${ANOTHER_POOL}
+unmap_device ${IMAGE} ${PID}
+DEV=
+
+# reset
+rbd config global rm global rbd_default_pool
+
+# auto unmap test
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+_sudo kill ${PID}
+for i in `seq 10`; do
+ rbd device --device-type nbd list | expect_false grep "^${PID} *${POOL} *${IMAGE}" && break
+ sleep 1
+done
+rbd device --device-type nbd list | expect_false grep "^${PID} *${POOL} *${IMAGE}"
+
+# quiesce test
+QUIESCE_HOOK=${TEMPDIR}/quiesce.sh
+DEV=`_sudo rbd device --device-type nbd map --quiesce --quiesce-hook ${QUIESCE_HOOK} ${POOL}/${IMAGE}`
+get_pid ${POOL}
+
+# test it fails if the hook does not exists
+test ! -e ${QUIESCE_HOOK}
+expect_false rbd snap create ${POOL}/${IMAGE}@quiesce1
+_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
+
+# test the hook is executed
+touch ${QUIESCE_HOOK}
+chmod +x ${QUIESCE_HOOK}
+cat > ${QUIESCE_HOOK} <<EOF
+#/bin/sh
+echo "test the hook is executed" >&2
+echo \$1 > ${TEMPDIR}/\$2
+EOF
+rbd snap create ${POOL}/${IMAGE}@quiesce1
+_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
+test "$(cat ${TEMPDIR}/quiesce)" = ${DEV}
+test "$(cat ${TEMPDIR}/unquiesce)" = ${DEV}
+
+# test snap create fails if the hook fails
+touch ${QUIESCE_HOOK}
+chmod +x ${QUIESCE_HOOK}
+cat > ${QUIESCE_HOOK} <<EOF
+#/bin/sh
+echo "test snap create fails if the hook fails" >&2
+exit 22
+EOF
+expect_false rbd snap create ${POOL}/${IMAGE}@quiesce2
+_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
+
+# test the hook is slow
+cat > ${QUIESCE_HOOK} <<EOF
+#/bin/sh
+echo "test the hook is slow" >&2
+sleep 7
+EOF
+rbd snap create ${POOL}/${IMAGE}@quiesce2
+_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
+
+# test rbd-nbd_quiesce hook that comes with distribution
+unmap_device ${DEV} ${PID}
+LOG_FILE=${TEMPDIR}/rbd-nbd.log
+if [ -n "${CEPH_SRC}" ]; then
+ QUIESCE_HOOK=${CEPH_SRC}/tools/rbd_nbd/rbd-nbd_quiesce
+ DEV=`_sudo rbd device --device-type nbd map --quiesce --quiesce-hook ${QUIESCE_HOOK} \
+ ${POOL}/${IMAGE} --log-file=${LOG_FILE}`
+else
+ DEV=`_sudo rbd device --device-type nbd map --quiesce ${POOL}/${IMAGE} --log-file=${LOG_FILE}`
+fi
+get_pid ${POOL}
+_sudo mkfs ${DEV}
+mkdir ${TEMPDIR}/mnt
+_sudo mount ${DEV} ${TEMPDIR}/mnt
+rbd snap create ${POOL}/${IMAGE}@quiesce3
+_sudo dd if=${DATA} of=${TEMPDIR}/mnt/test bs=1M count=1 oflag=direct
+_sudo umount ${TEMPDIR}/mnt
+unmap_device ${DEV} ${PID}
+DEV=
+cat ${LOG_FILE}
+expect_false grep 'quiesce failed' ${LOG_FILE}
+
+# test detach/attach
+OUT=`_sudo rbd device --device-type nbd --options try-netlink,show-cookie map ${POOL}/${IMAGE}`
+read DEV COOKIE <<< "${OUT}"
+get_pid ${POOL}
+_sudo mount ${DEV} ${TEMPDIR}/mnt
+_sudo rbd device detach ${POOL}/${IMAGE} --device-type nbd
+expect_false get_pid ${POOL}
+expect_false _sudo rbd device attach --device ${DEV} ${POOL}/${IMAGE} --device-type nbd
+if [ -n "${COOKIE}" ]; then
+ _sudo rbd device attach --device ${DEV} --cookie ${COOKIE} ${POOL}/${IMAGE} --device-type nbd
+else
+ _sudo rbd device attach --device ${DEV} ${POOL}/${IMAGE} --device-type nbd --force
+fi
+get_pid ${POOL}
+_sudo rbd device detach ${DEV} --device-type nbd
+expect_false get_pid ${POOL}
+if [ -n "${COOKIE}" ]; then
+ _sudo rbd device attach --device ${DEV} --cookie ${COOKIE} ${POOL}/${IMAGE} --device-type nbd
+else
+ _sudo rbd device attach --device ${DEV} ${POOL}/${IMAGE} --device-type nbd --force
+fi
+get_pid ${POOL}
+ls ${TEMPDIR}/mnt/
+dd if=${TEMPDIR}/mnt/test of=/dev/null bs=1M count=1
+_sudo dd if=${DATA} of=${TEMPDIR}/mnt/test1 bs=1M count=1 oflag=direct
+_sudo umount ${TEMPDIR}/mnt
+unmap_device ${DEV} ${PID}
+# if kernel supports cookies
+if [ -n "${COOKIE}" ]; then
+ OUT=`_sudo rbd device --device-type nbd --show-cookie --cookie "abc de" --options try-netlink map ${POOL}/${IMAGE}`
+ read DEV ANOTHER_COOKIE <<< "${OUT}"
+ get_pid ${POOL}
+ test "${ANOTHER_COOKIE}" = "abc de"
+ unmap_device ${DEV} ${PID}
+fi
+DEV=
+
+# test detach/attach with --snap-id
+SNAPID=`rbd snap ls ${POOL}/${IMAGE} | awk '$2 == "snap" {print $1}'`
+OUT=`_sudo rbd device --device-type nbd --options try-netlink,show-cookie map --snap-id ${SNAPID} ${POOL}/${IMAGE}`
+read DEV COOKIE <<< "${OUT}"
+get_pid ${POOL}
+_sudo rbd device detach ${POOL}/${IMAGE} --snap-id ${SNAPID} --device-type nbd
+expect_false get_pid ${POOL}
+expect_false _sudo rbd device attach --device ${DEV} --snap-id ${SNAPID} ${POOL}/${IMAGE} --device-type nbd
+if [ -n "${COOKIE}" ]; then
+ _sudo rbd device attach --device ${DEV} --cookie ${COOKIE} --snap-id ${SNAPID} ${POOL}/${IMAGE} --device-type nbd
+else
+ _sudo rbd device attach --device ${DEV} --snap-id ${SNAPID} ${POOL}/${IMAGE} --device-type nbd --force
+fi
+get_pid ${POOL}
+_sudo rbd device detach ${DEV} --device-type nbd
+expect_false get_pid ${POOL}
+DEV=
+
+# test discard granularity with journaling
+rbd config image set ${POOL}/${IMAGE} rbd_discard_granularity_bytes 4096
+rbd feature enable ${POOL}/${IMAGE} journaling
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+# since a discard will now be pruned to only whole blocks (0..4095, 4096..8191)
+# let us test all the cases around those alignments. 512 is the smallest
+# possible block blkdiscard allows us to use. Thus the test checks
+# 512 before, on the alignment, 512 after.
+_sudo blkdiscard --offset 0 --length $((4096-512)) ${DEV}
+_sudo blkdiscard --offset 0 --length 4096 ${DEV}
+_sudo blkdiscard --offset 0 --length $((4096+512)) ${DEV}
+_sudo blkdiscard --offset 512 --length $((8192-1024)) ${DEV}
+_sudo blkdiscard --offset 512 --length $((8192-512)) ${DEV}
+_sudo blkdiscard --offset 512 --length 8192 ${DEV}
+# wait for commit log to be empty, 10 seconds should be well enough
+tries=0
+queue_length=`rbd journal inspect --pool ${POOL} --image ${IMAGE} | awk '/entries inspected/ {print $1}'`
+while [ ${tries} -lt 10 ] && [ ${queue_length} -gt 0 ]; do
+ rbd journal inspect --pool ${POOL} --image ${IMAGE} --verbose
+ sleep 1
+ queue_length=`rbd journal inspect --pool ${POOL} --image ${IMAGE} | awk '/entries inspected/ {print $1}'`
+ tries=$((tries+1))
+done
+[ ${queue_length} -eq 0 ]
+unmap_device ${DEV} ${PID}
+DEV=
+rbd feature disable ${POOL}/${IMAGE} journaling
+rbd config image rm ${POOL}/${IMAGE} rbd_discard_granularity_bytes
+
+# test that disabling a feature so that the op is proxied to rbd-nbd
+# (arranged here by blkdiscard before "rbd feature disable") doesn't hang
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+rbd feature enable ${POOL}/${IMAGE} journaling
+_sudo blkdiscard --offset 0 --length 4096 ${DEV}
+rbd feature disable ${POOL}/${IMAGE} journaling
+unmap_device ${DEV} ${PID}
+DEV=
+
+# test that rbd_op_threads setting takes effect
+EXPECTED=`ceph-conf --show-config-value librados_thread_count`
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+ACTUAL=`ps -p ${PID} -T | grep -c io_context_pool`
+[ ${ACTUAL} -eq ${EXPECTED} ]
+unmap_device ${DEV} ${PID}
+EXPECTED=$((EXPECTED * 3 + 1))
+DEV=`_sudo rbd device --device-type nbd --rbd-op-threads ${EXPECTED} map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+ACTUAL=`ps -p ${PID} -T | grep -c io_context_pool`
+[ ${ACTUAL} -eq ${EXPECTED} ]
+unmap_device ${DEV} ${PID}
+DEV=
+
+echo OK
diff --git a/qa/workunits/rbd/rbd_groups.sh b/qa/workunits/rbd/rbd_groups.sh
new file mode 100755
index 000000000..a32618484
--- /dev/null
+++ b/qa/workunits/rbd/rbd_groups.sh
@@ -0,0 +1,258 @@
+#!/usr/bin/env bash
+
+set -ex
+
+#
+# rbd_consistency_groups.sh - test consistency groups cli commands
+#
+
+#
+# Functions
+#
+
+create_group()
+{
+ local group_name=$1
+
+ rbd group create $group_name
+}
+
+list_groups()
+{
+ rbd group list
+}
+
+check_group_exists()
+{
+ local group_name=$1
+ list_groups | grep $group_name
+}
+
+remove_group()
+{
+ local group_name=$1
+
+ rbd group remove $group_name
+}
+
+rename_group()
+{
+ local src_name=$1
+ local dest_name=$2
+
+ rbd group rename $src_name $dest_name
+}
+
+check_group_does_not_exist()
+{
+ local group_name=$1
+ for v in $(list_groups); do
+ if [ "$v" == "$group_name" ]; then
+ return 1
+ fi
+ done
+ return 0
+}
+
+create_image()
+{
+ local image_name=$1
+ rbd create --size 10M $image_name
+}
+
+remove_image()
+{
+ local image_name=$1
+ rbd remove $image_name
+}
+
+add_image_to_group()
+{
+ local image_name=$1
+ local group_name=$2
+ rbd group image add $group_name $image_name
+}
+
+remove_image_from_group()
+{
+ local image_name=$1
+ local group_name=$2
+ rbd group image remove $group_name $image_name
+}
+
+check_image_in_group()
+{
+ local image_name=$1
+ local group_name=$2
+ for v in $(rbd group image list $group_name); do
+ local vtrimmed=${v#*/}
+ if [ "$vtrimmed" = "$image_name" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+check_image_not_in_group()
+{
+ local image_name=$1
+ local group_name=$2
+ for v in $(rbd group image list $group_name); do
+ local vtrimmed=${v#*/}
+ if [ "$vtrimmed" = "$image_name" ]; then
+ return 1
+ fi
+ done
+ return 0
+}
+
+create_snapshot()
+{
+ local group_name=$1
+ local snap_name=$2
+ rbd group snap create $group_name@$snap_name
+}
+
+create_snapshots()
+{
+ local group_name=$1
+ local snap_name=$2
+ local snap_count=$3
+ for i in `seq 1 $snap_count`; do
+ rbd group snap create $group_name@$snap_name$i
+ done
+}
+
+remove_snapshot()
+{
+ local group_name=$1
+ local snap_name=$2
+ rbd group snap remove $group_name@$snap_name
+}
+
+remove_snapshots()
+{
+ local group_name=$1
+ local snap_name=$2
+ local snap_count=$3
+ for i in `seq 1 $snap_count`; do
+ rbd group snap remove $group_name@$snap_name$i
+ done
+}
+
+rename_snapshot()
+{
+ local group_name=$1
+ local snap_name=$2
+ local new_snap_name=$3
+ rbd group snap rename $group_name@$snap_name $new_snap_name
+}
+
+list_snapshots()
+{
+ local group_name=$1
+ rbd group snap list $group_name
+}
+
+rollback_snapshot()
+{
+ local group_name=$1
+ local snap_name=$2
+ rbd group snap rollback $group_name@$snap_name
+}
+
+check_snapshot_in_group()
+{
+ local group_name=$1
+ local snap_name=$2
+ list_snapshots $group_name | grep $snap_name
+}
+
+check_snapshots_count_in_group()
+{
+ local group_name=$1
+ local snap_name=$2
+ local expected_count=$3
+ local actual_count
+ actual_count=$(list_snapshots $group_name | grep -c $snap_name)
+ (( actual_count == expected_count ))
+}
+
+check_snapshot_not_in_group()
+{
+ local group_name=$1
+ local snap_name=$2
+ for v in $(list_snapshots $group_name | awk '{print $1}'); do
+ if [ "$v" = "$snap_name" ]; then
+ return 1
+ fi
+ done
+ return 0
+}
+
+echo "TEST: create remove consistency group"
+group="test_consistency_group"
+new_group="test_new_consistency_group"
+create_group $group
+check_group_exists $group
+rename_group $group $new_group
+check_group_exists $new_group
+remove_group $new_group
+check_group_does_not_exist $new_group
+echo "PASSED"
+
+echo "TEST: add remove images to consistency group"
+image="test_image"
+group="test_consistency_group"
+create_image $image
+create_group $group
+add_image_to_group $image $group
+check_image_in_group $image $group
+remove_image_from_group $image $group
+check_image_not_in_group $image $group
+remove_group $group
+remove_image $image
+echo "PASSED"
+
+echo "TEST: create remove snapshots of consistency group"
+image="test_image"
+group="test_consistency_group"
+snap="group_snap"
+new_snap="new_group_snap"
+sec_snap="group_snap2"
+create_image $image
+create_group $group
+add_image_to_group $image $group
+create_snapshot $group $snap
+check_snapshot_in_group $group $snap
+rename_snapshot $group $snap $new_snap
+check_snapshot_not_in_group $group $snap
+create_snapshot $group $sec_snap
+check_snapshot_in_group $group $sec_snap
+rollback_snapshot $group $new_snap
+remove_snapshot $group $new_snap
+check_snapshot_not_in_group $group $new_snap
+remove_snapshot $group $sec_snap
+check_snapshot_not_in_group $group $sec_snap
+remove_group $group
+remove_image $image
+echo "PASSED"
+
+echo "TEST: list snapshots of consistency group"
+image="test_image"
+group="test_consistency_group"
+snap="group_snap"
+create_image $image
+create_group $group
+add_image_to_group $image $group
+create_snapshots $group $snap 10
+check_snapshots_count_in_group $group $snap 10
+remove_snapshots $group $snap 10
+create_snapshots $group $snap 100
+check_snapshots_count_in_group $group $snap 100
+remove_snapshots $group $snap 100
+remove_group $group
+remove_image $image
+echo "PASSED"
+
+echo "OK"
diff --git a/qa/workunits/rbd/rbd_mirror_bootstrap.sh b/qa/workunits/rbd/rbd_mirror_bootstrap.sh
new file mode 100755
index 000000000..6ef06f2b8
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_bootstrap.sh
@@ -0,0 +1,58 @@
+#!/bin/sh -ex
+#
+# rbd_mirror_bootstrap.sh - test peer bootstrap create/import
+#
+
+RBD_MIRROR_MANUAL_PEERS=1
+RBD_MIRROR_INSTANCES=${RBD_MIRROR_INSTANCES:-1}
+. $(dirname $0)/rbd_mirror_helpers.sh
+
+setup
+
+testlog "TEST: bootstrap cluster2 from cluster1"
+# create token on cluster1 and import to cluster2
+TOKEN=${TEMPDIR}/peer-token
+TOKEN_2=${TEMPDIR}/peer-token-2
+CEPH_ARGS='' rbd --cluster ${CLUSTER1} mirror pool peer bootstrap create ${POOL} > ${TOKEN}
+CEPH_ARGS='' rbd --cluster ${CLUSTER1} mirror pool peer bootstrap create ${PARENT_POOL} > ${TOKEN_2}
+cmp ${TOKEN} ${TOKEN_2}
+
+CEPH_ARGS='' rbd --cluster ${CLUSTER2} --pool ${POOL} mirror pool peer bootstrap import ${TOKEN} --direction rx-only
+CEPH_ARGS='' rbd --cluster ${CLUSTER2} --pool ${PARENT_POOL} mirror pool peer bootstrap import ${TOKEN} --direction rx-tx
+
+start_mirrors ${CLUSTER1}
+start_mirrors ${CLUSTER2}
+
+testlog "TEST: verify rx-only direction"
+# rx-only peer is added immediately by "rbd mirror pool peer bootstrap import"
+rbd --cluster ${CLUSTER2} --pool ${POOL} mirror pool info --format json | jq -e '.peers[0].direction == "rx-only"'
+# tx-only peer is added asynchronously by mirror_peer_ping class method
+while ! rbd --cluster ${CLUSTER1} --pool ${POOL} mirror pool info --format json | jq -e '.peers | length > 0'; do
+ sleep 1
+done
+rbd --cluster ${CLUSTER1} --pool ${POOL} mirror pool info --format json | jq -e '.peers[0].direction == "tx-only"'
+
+create_image_and_enable_mirror ${CLUSTER1} ${POOL} image1
+
+wait_for_image_replay_started ${CLUSTER2} ${POOL} image1
+write_image ${CLUSTER1} ${POOL} image1 100
+wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} image1
+
+testlog "TEST: verify rx-tx direction"
+# both rx-tx peers are added immediately by "rbd mirror pool peer bootstrap import"
+rbd --cluster ${CLUSTER1} --pool ${PARENT_POOL} mirror pool info --format json | jq -e '.peers[0].direction == "rx-tx"'
+rbd --cluster ${CLUSTER2} --pool ${PARENT_POOL} mirror pool info --format json | jq -e '.peers[0].direction == "rx-tx"'
+
+create_image ${CLUSTER1} ${PARENT_POOL} image1
+create_image ${CLUSTER2} ${PARENT_POOL} image2
+
+enable_mirror ${CLUSTER1} ${PARENT_POOL} image1
+enable_mirror ${CLUSTER2} ${PARENT_POOL} image2
+
+wait_for_image_replay_started ${CLUSTER2} ${PARENT_POOL} image1
+write_image ${CLUSTER1} ${PARENT_POOL} image1 100
+wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${PARENT_POOL} image1
+
+wait_for_image_replay_started ${CLUSTER1} ${PARENT_POOL} image2
+write_image ${CLUSTER2} ${PARENT_POOL} image2 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${PARENT_POOL} image2
diff --git a/qa/workunits/rbd/rbd_mirror_fsx_compare.sh b/qa/workunits/rbd/rbd_mirror_fsx_compare.sh
new file mode 100755
index 000000000..0ba3c97d7
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_fsx_compare.sh
@@ -0,0 +1,38 @@
+#!/bin/sh -ex
+#
+# rbd_mirror_fsx_compare.sh - test rbd-mirror daemon under FSX workload
+#
+# The script is used to compare FSX-generated images between two clusters.
+#
+
+. $(dirname $0)/rbd_mirror_helpers.sh
+
+trap 'cleanup $?' INT TERM EXIT
+
+setup_tempdir
+
+testlog "TEST: wait for all images"
+image_count=$(rbd --cluster ${CLUSTER1} --pool ${POOL} ls | wc -l)
+retrying_seconds=0
+sleep_seconds=10
+while [ ${retrying_seconds} -le 7200 ]; do
+ [ $(rbd --cluster ${CLUSTER2} --pool ${POOL} ls | wc -l) -ge ${image_count} ] && break
+ sleep ${sleep_seconds}
+ retrying_seconds=$(($retrying_seconds+${sleep_seconds}))
+done
+
+testlog "TEST: snapshot all pool images"
+snap_id=`uuidgen`
+for image in $(rbd --cluster ${CLUSTER1} --pool ${POOL} ls); do
+ create_snapshot ${CLUSTER1} ${POOL} ${image} ${snap_id}
+done
+
+testlog "TEST: wait for snapshots"
+for image in $(rbd --cluster ${CLUSTER1} --pool ${POOL} ls); do
+ wait_for_snap_present ${CLUSTER2} ${POOL} ${image} ${snap_id}
+done
+
+testlog "TEST: compare image snapshots"
+for image in $(rbd --cluster ${CLUSTER1} --pool ${POOL} ls); do
+ compare_image_snapshots ${POOL} ${image}
+done
diff --git a/qa/workunits/rbd/rbd_mirror_fsx_prepare.sh b/qa/workunits/rbd/rbd_mirror_fsx_prepare.sh
new file mode 100755
index 000000000..d988987ba
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_fsx_prepare.sh
@@ -0,0 +1,10 @@
+#!/bin/sh -ex
+#
+# rbd_mirror_fsx_prepare.sh - test rbd-mirror daemon under FSX workload
+#
+# The script is used to compare FSX-generated images between two clusters.
+#
+
+. $(dirname $0)/rbd_mirror_helpers.sh
+
+setup
diff --git a/qa/workunits/rbd/rbd_mirror_ha.sh b/qa/workunits/rbd/rbd_mirror_ha.sh
new file mode 100755
index 000000000..37739a83d
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_ha.sh
@@ -0,0 +1,210 @@
+#!/bin/sh -ex
+#
+# rbd_mirror_ha.sh - test rbd-mirror daemons in HA mode
+#
+
+RBD_MIRROR_INSTANCES=${RBD_MIRROR_INSTANCES:-7}
+
+. $(dirname $0)/rbd_mirror_helpers.sh
+
+setup
+
+is_leader()
+{
+ local instance=$1
+ local pool=$2
+
+ test -n "${pool}" || pool=${POOL}
+
+ admin_daemon "${CLUSTER1}:${instance}" \
+ rbd mirror status ${pool} ${CLUSTER2}${PEER_CLUSTER_SUFFIX} |
+ grep '"leader": true'
+}
+
+wait_for_leader()
+{
+ local s instance
+
+ for s in 1 1 2 4 4 4 4 4 8 8 8 8 16 16 32 64; do
+ sleep $s
+ for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ is_leader ${instance} || continue
+ LEADER=${instance}
+ return 0
+ done
+ done
+
+ LEADER=
+ return 1
+}
+
+release_leader()
+{
+ local pool=$1
+ local cmd="rbd mirror leader release"
+
+ test -n "${pool}" && cmd="${cmd} ${pool} ${CLUSTER2}"
+
+ admin_daemon "${CLUSTER1}:${LEADER}" ${cmd}
+}
+
+wait_for_leader_released()
+{
+ local i
+
+ test -n "${LEADER}"
+ for i in `seq 10`; do
+ is_leader ${LEADER} || return 0
+ sleep 1
+ done
+
+ return 1
+}
+
+test_replay()
+{
+ local image
+
+ for image; do
+ wait_for_image_replay_started ${CLUSTER1}:${LEADER} ${POOL} ${image}
+ write_image ${CLUSTER2} ${POOL} ${image} 100
+ wait_for_replay_complete ${CLUSTER1}:${LEADER} ${CLUSTER2} ${POOL} \
+ ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' \
+ 'primary_position' \
+ "${MIRROR_USER_ID_PREFIX}${LEADER} on $(hostname -s)"
+ if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} \
+ 'down+unknown'
+ fi
+ compare_images ${POOL} ${image}
+ done
+}
+
+testlog "TEST: start first daemon instance and test replay"
+start_mirror ${CLUSTER1}:0
+image1=test1
+create_image ${CLUSTER2} ${POOL} ${image1}
+LEADER=0
+test_replay ${image1}
+
+testlog "TEST: release leader and wait it is reacquired"
+is_leader 0 ${POOL}
+is_leader 0 ${PARENT_POOL}
+release_leader ${POOL}
+wait_for_leader_released
+is_leader 0 ${PARENT_POOL}
+wait_for_leader
+release_leader
+wait_for_leader_released
+expect_failure "" is_leader 0 ${PARENT_POOL}
+wait_for_leader
+
+testlog "TEST: start second daemon instance and test replay"
+start_mirror ${CLUSTER1}:1
+image2=test2
+create_image ${CLUSTER2} ${POOL} ${image2}
+test_replay ${image1} ${image2}
+
+testlog "TEST: release leader and test it is acquired by secondary"
+is_leader 0 ${POOL}
+is_leader 0 ${PARENT_POOL}
+release_leader ${POOL}
+wait_for_leader_released
+wait_for_leader
+test_replay ${image1} ${image2}
+release_leader
+wait_for_leader_released
+wait_for_leader
+test "${LEADER}" = 0
+
+testlog "TEST: stop first daemon instance and test replay"
+stop_mirror ${CLUSTER1}:0
+image3=test3
+create_image ${CLUSTER2} ${POOL} ${image3}
+LEADER=1
+test_replay ${image1} ${image2} ${image3}
+
+testlog "TEST: start first daemon instance and test replay"
+start_mirror ${CLUSTER1}:0
+image4=test4
+create_image ${CLUSTER2} ${POOL} ${image4}
+test_replay ${image3} ${image4}
+
+testlog "TEST: crash leader and test replay"
+stop_mirror ${CLUSTER1}:1 -KILL
+image5=test5
+create_image ${CLUSTER2} ${POOL} ${image5}
+LEADER=0
+test_replay ${image1} ${image4} ${image5}
+
+testlog "TEST: start crashed leader and test replay"
+start_mirror ${CLUSTER1}:1
+image6=test6
+create_image ${CLUSTER2} ${POOL} ${image6}
+test_replay ${image1} ${image6}
+
+testlog "TEST: start yet another daemon instance and test replay"
+start_mirror ${CLUSTER1}:2
+image7=test7
+create_image ${CLUSTER2} ${POOL} ${image7}
+test_replay ${image1} ${image7}
+
+testlog "TEST: release leader and test it is acquired by secondary"
+is_leader 0
+release_leader
+wait_for_leader_released
+wait_for_leader
+test_replay ${image1} ${image2}
+
+testlog "TEST: stop leader and test replay"
+stop_mirror ${CLUSTER1}:${LEADER}
+image8=test8
+create_image ${CLUSTER2} ${POOL} ${image8}
+prev_leader=${LEADER}
+wait_for_leader
+test_replay ${image1} ${image8}
+
+testlog "TEST: start previous leader and test replay"
+start_mirror ${CLUSTER1}:${prev_leader}
+image9=test9
+create_image ${CLUSTER2} ${POOL} ${image9}
+test_replay ${image1} ${image9}
+
+testlog "TEST: crash leader and test replay"
+stop_mirror ${CLUSTER1}:${LEADER} -KILL
+image10=test10
+create_image ${CLUSTER2} ${POOL} ${image10}
+prev_leader=${LEADER}
+wait_for_leader
+test_replay ${image1} ${image10}
+
+testlog "TEST: start previous leader and test replay"
+start_mirror ${CLUSTER1}:${prev_leader}
+image11=test11
+create_image ${CLUSTER2} ${POOL} ${image11}
+test_replay ${image1} ${image11}
+
+testlog "TEST: start some more daemon instances and test replay"
+start_mirror ${CLUSTER1}:3
+start_mirror ${CLUSTER1}:4
+start_mirror ${CLUSTER1}:5
+start_mirror ${CLUSTER1}:6
+image13=test13
+create_image ${CLUSTER2} ${POOL} ${image13}
+test_replay ${leader} ${image1} ${image13}
+
+testlog "TEST: release leader and test it is acquired by secondary"
+release_leader
+wait_for_leader_released
+wait_for_leader
+test_replay ${image1} ${image2}
+
+testlog "TEST: in loop: stop leader and test replay"
+for i in 0 1 2 3 4 5; do
+ stop_mirror ${CLUSTER1}:${LEADER}
+ wait_for_leader
+ test_replay ${image1}
+done
+
+stop_mirror ${CLUSTER1}:${LEADER}
diff --git a/qa/workunits/rbd/rbd_mirror_helpers.sh b/qa/workunits/rbd/rbd_mirror_helpers.sh
new file mode 100755
index 000000000..f4961b925
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_helpers.sh
@@ -0,0 +1,1488 @@
+#!/bin/sh
+#
+# rbd_mirror_helpers.sh - shared rbd-mirror daemon helper functions
+#
+# The scripts starts two ("local" and "remote") clusters using mstart.sh script,
+# creates a temporary directory, used for cluster configs, daemon logs, admin
+# socket, temporary files, and launches rbd-mirror daemon.
+#
+# There are several env variables useful when troubleshooting a test failure:
+#
+# RBD_MIRROR_NOCLEANUP - if not empty, don't run the cleanup (stop processes,
+# destroy the clusters and remove the temp directory)
+# on exit, so it is possible to check the test state
+# after failure.
+# RBD_MIRROR_TEMDIR - use this path when creating the temporary directory
+# (should not exist) instead of running mktemp(1).
+# RBD_MIRROR_ARGS - use this to pass additional arguments to started
+# rbd-mirror daemons.
+# RBD_MIRROR_VARGS - use this to pass additional arguments to vstart.sh
+# when starting clusters.
+# RBD_MIRROR_INSTANCES - number of daemons to start per cluster
+# RBD_MIRROR_CONFIG_KEY - if not empty, use config-key for remote cluster
+# secrets
+# The cleanup can be done as a separate step, running the script with
+# `cleanup ${RBD_MIRROR_TEMDIR}' arguments.
+#
+# Note, as other workunits tests, rbd_mirror_journal.sh expects to find ceph binaries
+# in PATH.
+#
+# Thus a typical troubleshooting session:
+#
+# From Ceph src dir (CEPH_SRC_PATH), start the test in NOCLEANUP mode and with
+# TEMPDIR pointing to a known location:
+#
+# cd $CEPH_SRC_PATH
+# PATH=$CEPH_SRC_PATH:$PATH
+# RBD_MIRROR_NOCLEANUP=1 RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror \
+# ../qa/workunits/rbd/rbd_mirror_journal.sh
+#
+# After the test failure cd to TEMPDIR and check the current state:
+#
+# cd /tmp/tmp.rbd_mirror
+# ls
+# less rbd-mirror.cluster1_daemon.$pid.log
+# ceph --cluster cluster1 -s
+# ceph --cluster cluster1 -s
+# rbd --cluster cluster2 -p mirror ls
+# rbd --cluster cluster2 -p mirror journal status --image test
+# ceph --admin-daemon rbd-mirror.cluster1_daemon.cluster1.$pid.asok help
+# ...
+#
+# Also you can execute commands (functions) from the script:
+#
+# cd $CEPH_SRC_PATH
+# export RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror
+# ../qa/workunits/rbd/rbd_mirror_journal.sh status
+# ../qa/workunits/rbd/rbd_mirror_journal.sh stop_mirror cluster1
+# ../qa/workunits/rbd/rbd_mirror_journal.sh start_mirror cluster2
+# ../qa/workunits/rbd/rbd_mirror_journal.sh flush cluster2
+# ...
+#
+# Eventually, run the cleanup:
+#
+# cd $CEPH_SRC_PATH
+# RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror \
+# ../qa/workunits/rbd/rbd_mirror_journal.sh cleanup
+#
+
+if type xmlstarlet > /dev/null 2>&1; then
+ XMLSTARLET=xmlstarlet
+elif type xml > /dev/null 2>&1; then
+ XMLSTARLET=xml
+else
+ echo "Missing xmlstarlet binary!"
+ exit 1
+fi
+
+RBD_MIRROR_INSTANCES=${RBD_MIRROR_INSTANCES:-2}
+
+CLUSTER1=cluster1
+CLUSTER2=cluster2
+PEER_CLUSTER_SUFFIX=
+POOL=mirror
+PARENT_POOL=mirror_parent
+NS1=ns1
+NS2=ns2
+TEMPDIR=
+CEPH_ID=${CEPH_ID:-mirror}
+RBD_IMAGE_FEATURES=${RBD_IMAGE_FEATURES:-layering,exclusive-lock,journaling}
+MIRROR_USER_ID_PREFIX=${MIRROR_USER_ID_PREFIX:-${CEPH_ID}.}
+MIRROR_POOL_MODE=${MIRROR_POOL_MODE:-pool}
+MIRROR_IMAGE_MODE=${MIRROR_IMAGE_MODE:-journal}
+
+export CEPH_ARGS="--id ${CEPH_ID}"
+
+LAST_MIRROR_INSTANCE=$((${RBD_MIRROR_INSTANCES} - 1))
+
+CEPH_ROOT=$(readlink -f $(dirname $0)/../../../src)
+CEPH_BIN=.
+CEPH_SRC=.
+if [ -e CMakeCache.txt ]; then
+ CEPH_SRC=${CEPH_ROOT}
+ CEPH_ROOT=${PWD}
+ CEPH_BIN=./bin
+
+ # needed for ceph CLI under cmake
+ export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
+ export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind:${CEPH_ROOT}/lib/cython_modules/lib.3
+fi
+
+# These vars facilitate running this script in an environment with
+# ceph installed from packages, like teuthology. These are not defined
+# by default.
+#
+# RBD_MIRROR_USE_EXISTING_CLUSTER - if set, do not start and stop ceph clusters
+# RBD_MIRROR_USE_RBD_MIRROR - if set, use an existing instance of rbd-mirror
+# running as ceph client $CEPH_ID. If empty,
+# this script will start and stop rbd-mirror
+
+#
+# Functions
+#
+
+# Parse a value in format cluster[:instance] and set cluster and instance vars.
+set_cluster_instance()
+{
+ local val=$1
+ local cluster_var_name=$2
+ local instance_var_name=$3
+
+ cluster=${val%:*}
+ instance=${val##*:}
+
+ if [ "${instance}" = "${val}" ]; then
+ # instance was not specified, use default
+ instance=0
+ fi
+
+ eval ${cluster_var_name}=${cluster}
+ eval ${instance_var_name}=${instance}
+}
+
+daemon_asok_file()
+{
+ local local_cluster=$1
+ local cluster=$2
+ local instance
+
+ set_cluster_instance "${local_cluster}" local_cluster instance
+
+ echo $(ceph-conf --cluster $local_cluster --name "client.${MIRROR_USER_ID_PREFIX}${instance}" 'admin socket')
+}
+
+daemon_pid_file()
+{
+ local cluster=$1
+ local instance
+
+ set_cluster_instance "${cluster}" cluster instance
+
+ echo $(ceph-conf --cluster $cluster --name "client.${MIRROR_USER_ID_PREFIX}${instance}" 'pid file')
+}
+
+testlog()
+{
+ echo $(date '+%F %T') $@ | tee -a "${TEMPDIR}/rbd-mirror.test.log" >&2
+}
+
+expect_failure()
+{
+ local expected="$1" ; shift
+ local out=${TEMPDIR}/expect_failure.out
+
+ if "$@" > ${out} 2>&1 ; then
+ cat ${out} >&2
+ return 1
+ fi
+
+ if [ -z "${expected}" ]; then
+ return 0
+ fi
+
+ if ! grep -q "${expected}" ${out} ; then
+ cat ${out} >&2
+ return 1
+ fi
+
+ return 0
+}
+
+mkfname()
+{
+ echo "$@" | sed -e 's|[/ ]|_|g'
+}
+
+create_users()
+{
+ local cluster=$1
+
+ CEPH_ARGS='' ceph --cluster "${cluster}" \
+ auth get-or-create client.${CEPH_ID} \
+ mon 'profile rbd' osd 'profile rbd' mgr 'profile rbd' >> \
+ ${CEPH_ROOT}/run/${cluster}/keyring
+ for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ CEPH_ARGS='' ceph --cluster "${cluster}" \
+ auth get-or-create client.${MIRROR_USER_ID_PREFIX}${instance} \
+ mon 'profile rbd-mirror' osd 'profile rbd' mgr 'profile rbd' >> \
+ ${CEPH_ROOT}/run/${cluster}/keyring
+ done
+}
+
+setup_cluster()
+{
+ local cluster=$1
+
+ CEPH_ARGS='' ${CEPH_SRC}/mstart.sh ${cluster} -n ${RBD_MIRROR_VARGS}
+
+ cd ${CEPH_ROOT}
+ rm -f ${TEMPDIR}/${cluster}.conf
+ ln -s $(readlink -f run/${cluster}/ceph.conf) \
+ ${TEMPDIR}/${cluster}.conf
+
+ cd ${TEMPDIR}
+ create_users "${cluster}"
+
+ for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ cat<<EOF >> ${TEMPDIR}/${cluster}.conf
+[client.${MIRROR_USER_ID_PREFIX}${instance}]
+ admin socket = ${TEMPDIR}/rbd-mirror.\$cluster-\$name.asok
+ pid file = ${TEMPDIR}/rbd-mirror.\$cluster-\$name.pid
+ log file = ${TEMPDIR}/rbd-mirror.${cluster}_daemon.${instance}.log
+EOF
+ done
+}
+
+peer_add()
+{
+ local cluster=$1 ; shift
+ local pool=$1 ; shift
+ local client_cluster=$1 ; shift
+ local remote_cluster="${client_cluster##*@}"
+
+ local uuid_var_name
+ if [ -n "$1" ]; then
+ uuid_var_name=$1 ; shift
+ fi
+
+ local error_code
+ local peer_uuid
+
+ for s in 1 2 4 8 16 32; do
+ set +e
+ peer_uuid=$(rbd --cluster ${cluster} mirror pool peer add \
+ ${pool} ${client_cluster} $@)
+ error_code=$?
+ set -e
+
+ if [ $error_code -eq 17 ]; then
+ # raced with a remote heartbeat ping -- remove and retry
+ sleep $s
+ peer_uuid=$(rbd mirror pool info --cluster ${cluster} --pool ${pool} --format xml | \
+ xmlstarlet sel -t -v "//peers/peer[site_name='${remote_cluster}']/uuid")
+
+ CEPH_ARGS='' rbd --cluster ${cluster} --pool ${pool} mirror pool peer remove ${peer_uuid}
+ else
+ test $error_code -eq 0
+ if [ -n "$uuid_var_name" ]; then
+ eval ${uuid_var_name}=${peer_uuid}
+ fi
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+setup_pools()
+{
+ local cluster=$1
+ local remote_cluster=$2
+ local mon_map_file
+ local mon_addr
+ local admin_key_file
+ local uuid
+
+ CEPH_ARGS='' ceph --cluster ${cluster} osd pool create ${POOL} 64 64
+ CEPH_ARGS='' ceph --cluster ${cluster} osd pool create ${PARENT_POOL} 64 64
+
+ CEPH_ARGS='' rbd --cluster ${cluster} pool init ${POOL}
+ CEPH_ARGS='' rbd --cluster ${cluster} pool init ${PARENT_POOL}
+
+ if [ -n "${RBD_MIRROR_CONFIG_KEY}" ]; then
+ PEER_CLUSTER_SUFFIX=-DNE
+ fi
+
+ CEPH_ARGS='' rbd --cluster ${cluster} mirror pool enable \
+ --site-name ${cluster}${PEER_CLUSTER_SUFFIX} ${POOL} ${MIRROR_POOL_MODE}
+ rbd --cluster ${cluster} mirror pool enable ${PARENT_POOL} image
+
+ rbd --cluster ${cluster} namespace create ${POOL}/${NS1}
+ rbd --cluster ${cluster} namespace create ${POOL}/${NS2}
+
+ rbd --cluster ${cluster} mirror pool enable ${POOL}/${NS1} ${MIRROR_POOL_MODE}
+ rbd --cluster ${cluster} mirror pool enable ${POOL}/${NS2} image
+
+ if [ -z ${RBD_MIRROR_MANUAL_PEERS} ]; then
+ if [ -z ${RBD_MIRROR_CONFIG_KEY} ]; then
+ peer_add ${cluster} ${POOL} ${remote_cluster}
+ peer_add ${cluster} ${PARENT_POOL} ${remote_cluster}
+ else
+ mon_map_file=${TEMPDIR}/${remote_cluster}.monmap
+ CEPH_ARGS='' ceph --cluster ${remote_cluster} mon getmap > ${mon_map_file}
+ mon_addr=$(monmaptool --print ${mon_map_file} | grep -E 'mon\.' |
+ head -n 1 | sed -E 's/^[0-9]+: ([^ ]+).+$/\1/' | sed -E 's/\/[0-9]+//g')
+
+ admin_key_file=${TEMPDIR}/${remote_cluster}.client.${CEPH_ID}.key
+ CEPH_ARGS='' ceph --cluster ${remote_cluster} auth get-key client.${CEPH_ID} > ${admin_key_file}
+
+ CEPH_ARGS='' peer_add ${cluster} ${POOL} \
+ client.${CEPH_ID}@${remote_cluster}${PEER_CLUSTER_SUFFIX} '' \
+ --remote-mon-host "${mon_addr}" --remote-key-file ${admin_key_file}
+
+ peer_add ${cluster} ${PARENT_POOL} client.${CEPH_ID}@${remote_cluster}${PEER_CLUSTER_SUFFIX} uuid
+ CEPH_ARGS='' rbd --cluster ${cluster} mirror pool peer set ${PARENT_POOL} ${uuid} mon-host ${mon_addr}
+ CEPH_ARGS='' rbd --cluster ${cluster} mirror pool peer set ${PARENT_POOL} ${uuid} key-file ${admin_key_file}
+ fi
+ fi
+}
+
+setup_tempdir()
+{
+ if [ -n "${RBD_MIRROR_TEMDIR}" ]; then
+ test -d "${RBD_MIRROR_TEMDIR}" ||
+ mkdir "${RBD_MIRROR_TEMDIR}"
+ TEMPDIR="${RBD_MIRROR_TEMDIR}"
+ cd ${TEMPDIR}
+ else
+ TEMPDIR=`mktemp -d`
+ fi
+}
+
+setup()
+{
+ local c
+ trap 'cleanup $?' INT TERM EXIT
+
+ setup_tempdir
+ if [ -z "${RBD_MIRROR_USE_EXISTING_CLUSTER}" ]; then
+ setup_cluster "${CLUSTER1}"
+ setup_cluster "${CLUSTER2}"
+ fi
+
+ setup_pools "${CLUSTER1}" "${CLUSTER2}"
+ setup_pools "${CLUSTER2}" "${CLUSTER1}"
+
+ if [ -n "${RBD_MIRROR_MIN_COMPAT_CLIENT}" ]; then
+ CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd \
+ set-require-min-compat-client ${RBD_MIRROR_MIN_COMPAT_CLIENT}
+ CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd \
+ set-require-min-compat-client ${RBD_MIRROR_MIN_COMPAT_CLIENT}
+ fi
+}
+
+cleanup()
+{
+ local error_code=$1
+
+ set +e
+
+ if [ "${error_code}" -ne 0 ]; then
+ status
+ fi
+
+ if [ -z "${RBD_MIRROR_NOCLEANUP}" ]; then
+ local cluster instance
+
+ CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
+ CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
+ CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool rm ${PARENT_POOL} ${PARENT_POOL} --yes-i-really-really-mean-it
+ CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool rm ${PARENT_POOL} ${PARENT_POOL} --yes-i-really-really-mean-it
+
+ for cluster in "${CLUSTER1}" "${CLUSTER2}"; do
+ stop_mirrors "${cluster}"
+ done
+
+ if [ -z "${RBD_MIRROR_USE_EXISTING_CLUSTER}" ]; then
+ cd ${CEPH_ROOT}
+ CEPH_ARGS='' ${CEPH_SRC}/mstop.sh ${CLUSTER1}
+ CEPH_ARGS='' ${CEPH_SRC}/mstop.sh ${CLUSTER2}
+ fi
+ test "${RBD_MIRROR_TEMDIR}" = "${TEMPDIR}" || rm -Rf ${TEMPDIR}
+ fi
+
+ if [ "${error_code}" -eq 0 ]; then
+ echo "OK"
+ else
+ echo "FAIL"
+ fi
+
+ exit ${error_code}
+}
+
+start_mirror()
+{
+ local cluster=$1
+ local instance
+
+ set_cluster_instance "${cluster}" cluster instance
+
+ test -n "${RBD_MIRROR_USE_RBD_MIRROR}" && return
+
+ rbd-mirror \
+ --cluster ${cluster} \
+ --id ${MIRROR_USER_ID_PREFIX}${instance} \
+ --rbd-mirror-delete-retry-interval=5 \
+ --rbd-mirror-image-state-check-interval=5 \
+ --rbd-mirror-journal-poll-age=1 \
+ --rbd-mirror-pool-replayers-refresh-interval=5 \
+ --debug-rbd=30 --debug-journaler=30 \
+ --debug-rbd_mirror=30 \
+ --daemonize=true \
+ ${RBD_MIRROR_ARGS}
+}
+
+start_mirrors()
+{
+ local cluster=$1
+
+ for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ start_mirror "${cluster}:${instance}"
+ done
+}
+
+stop_mirror()
+{
+ local cluster=$1
+ local sig=$2
+
+ test -n "${RBD_MIRROR_USE_RBD_MIRROR}" && return
+
+ local pid
+ pid=$(cat $(daemon_pid_file "${cluster}") 2>/dev/null) || :
+ if [ -n "${pid}" ]
+ then
+ kill ${sig} ${pid}
+ for s in 1 2 4 8 16 32; do
+ sleep $s
+ ps auxww | awk -v pid=${pid} '$2 == pid {print; exit 1}' && break
+ done
+ ps auxww | awk -v pid=${pid} '$2 == pid {print; exit 1}'
+ fi
+ rm -f $(daemon_asok_file "${cluster}" "${CLUSTER1}")
+ rm -f $(daemon_asok_file "${cluster}" "${CLUSTER2}")
+ rm -f $(daemon_pid_file "${cluster}")
+}
+
+stop_mirrors()
+{
+ local cluster=$1
+ local sig=$2
+
+ for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ stop_mirror "${cluster}:${instance}" "${sig}"
+ done
+}
+
+admin_daemon()
+{
+ local cluster=$1 ; shift
+ local instance
+
+ set_cluster_instance "${cluster}" cluster instance
+
+ local asok_file=$(daemon_asok_file "${cluster}:${instance}" "${cluster}")
+ test -S "${asok_file}"
+
+ ceph --admin-daemon ${asok_file} $@
+}
+
+admin_daemons()
+{
+ local cluster_instance=$1 ; shift
+ local cluster="${cluster_instance%:*}"
+ local instance="${cluster_instance##*:}"
+ local loop_instance
+
+ for s in 0 1 2 4 8 8 8 8 8 8 8 8 16 16; do
+ sleep ${s}
+ if [ "${instance}" != "${cluster_instance}" ]; then
+ admin_daemon "${cluster}:${instance}" $@ && return 0
+ else
+ for loop_instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ admin_daemon "${cluster}:${loop_instance}" $@ && return 0
+ done
+ fi
+ done
+ return 1
+}
+
+all_admin_daemons()
+{
+ local cluster=$1 ; shift
+
+ for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ admin_daemon "${cluster}:${instance}" $@
+ done
+}
+
+status()
+{
+ local cluster daemon image_pool image_ns image
+
+ for cluster in ${CLUSTER1} ${CLUSTER2}
+ do
+ echo "${cluster} status"
+ CEPH_ARGS='' ceph --cluster ${cluster} -s
+ CEPH_ARGS='' ceph --cluster ${cluster} service dump
+ CEPH_ARGS='' ceph --cluster ${cluster} service status
+ echo
+
+ for image_pool in ${POOL} ${PARENT_POOL}
+ do
+ for image_ns in "" "${NS1}" "${NS2}"
+ do
+ echo "${cluster} ${image_pool} ${image_ns} images"
+ rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" ls -l
+ echo
+
+ echo "${cluster} ${image_pool}${image_ns} mirror pool info"
+ rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" mirror pool info
+ echo
+
+ echo "${cluster} ${image_pool}${image_ns} mirror pool status"
+ CEPH_ARGS='' rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" mirror pool status --verbose
+ echo
+
+ for image in `rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" ls 2>/dev/null`
+ do
+ echo "image ${image} info"
+ rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" info ${image}
+ echo
+ echo "image ${image} journal status"
+ rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" journal status --image ${image}
+ echo
+ echo "image ${image} snapshots"
+ rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" snap ls --all ${image}
+ echo
+ done
+
+ echo "${cluster} ${image_pool} ${image_ns} rbd_mirroring omap vals"
+ rados --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" listomapvals rbd_mirroring
+ echo "${cluster} ${image_pool} ${image_ns} rbd_mirror_leader omap vals"
+ rados --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" listomapvals rbd_mirror_leader
+ echo
+ done
+ done
+ done
+
+ local ret
+
+ for cluster in "${CLUSTER1}" "${CLUSTER2}"
+ do
+ for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ local pid_file=$(daemon_pid_file ${cluster}:${instance})
+ if [ ! -e ${pid_file} ]
+ then
+ echo "${cluster} rbd-mirror not running or unknown" \
+ "(${pid_file} not exist)"
+ continue
+ fi
+
+ local pid
+ pid=$(cat ${pid_file} 2>/dev/null) || :
+ if [ -z "${pid}" ]
+ then
+ echo "${cluster} rbd-mirror not running or unknown" \
+ "(can't find pid using ${pid_file})"
+ ret=1
+ continue
+ fi
+
+ echo "${daemon} rbd-mirror process in ps output:"
+ if ps auxww |
+ awk -v pid=${pid} 'NR == 1 {print} $2 == pid {print; exit 1}'
+ then
+ echo
+ echo "${cluster} rbd-mirror not running" \
+ "(can't find pid $pid in ps output)"
+ ret=1
+ continue
+ fi
+ echo
+
+ local asok_file=$(daemon_asok_file ${cluster}:${instance} ${cluster})
+ if [ ! -S "${asok_file}" ]
+ then
+ echo "${cluster} rbd-mirror asok is unknown (${asok_file} not exits)"
+ ret=1
+ continue
+ fi
+
+ echo "${cluster} rbd-mirror status"
+ ceph --admin-daemon ${asok_file} rbd mirror status
+ echo
+ done
+ done
+
+ return ${ret}
+}
+
+flush()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local cmd="rbd mirror flush"
+
+ if [ -n "${image}" ]
+ then
+ cmd="${cmd} ${pool}/${image}"
+ fi
+
+ admin_daemons "${cluster}" ${cmd}
+}
+
+test_image_replay_state()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local test_state=$4
+ local status_result
+ local current_state=stopped
+
+ status_result=$(admin_daemons "${cluster}" rbd mirror status ${pool}/${image} | grep -i 'state') || return 1
+ echo "${status_result}" | grep -i 'Replaying' && current_state=started
+ test "${test_state}" = "${current_state}"
+}
+
+wait_for_image_replay_state()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local state=$4
+ local s
+
+ # TODO: add a way to force rbd-mirror to update replayers
+ for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
+ sleep ${s}
+ test_image_replay_state "${cluster}" "${pool}" "${image}" "${state}" && return 0
+ done
+ return 1
+}
+
+wait_for_image_replay_started()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ wait_for_image_replay_state "${cluster}" "${pool}" "${image}" started
+}
+
+wait_for_image_replay_stopped()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ wait_for_image_replay_state "${cluster}" "${pool}" "${image}" stopped
+}
+
+get_journal_position()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local id_regexp=$4
+
+ # Parse line like below, looking for the first position
+ # [id=, commit_position=[positions=[[object_number=1, tag_tid=3, entry_tid=9], [object_number=0, tag_tid=3, entry_tid=8], [object_number=3, tag_tid=3, entry_tid=7], [object_number=2, tag_tid=3, entry_tid=6]]]]
+
+ local status_log=${TEMPDIR}/$(mkfname ${CLUSTER2}-${pool}-${image}.status)
+ rbd --cluster ${cluster} journal status --image ${pool}/${image} |
+ tee ${status_log} >&2
+ sed -nEe 's/^.*\[id='"${id_regexp}"',.*positions=\[\[([^]]*)\],.*state=connected.*$/\1/p' \
+ ${status_log}
+}
+
+get_master_journal_position()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ get_journal_position "${cluster}" "${pool}" "${image}" ''
+}
+
+get_mirror_journal_position()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ get_journal_position "${cluster}" "${pool}" "${image}" '..*'
+}
+
+wait_for_journal_replay_complete()
+{
+ local local_cluster=$1
+ local cluster=$2
+ local pool=$3
+ local image=$4
+ local s master_pos mirror_pos last_mirror_pos
+ local master_tag master_entry mirror_tag mirror_entry
+
+ while true; do
+ for s in 0.2 0.4 0.8 1.6 2 2 4 4 8 8 16 16 32 32; do
+ sleep ${s}
+ flush "${local_cluster}" "${pool}" "${image}"
+ master_pos=$(get_master_journal_position "${cluster}" "${pool}" "${image}")
+ mirror_pos=$(get_mirror_journal_position "${cluster}" "${pool}" "${image}")
+ test -n "${master_pos}" -a "${master_pos}" = "${mirror_pos}" && return 0
+ test "${mirror_pos}" != "${last_mirror_pos}" && break
+ done
+
+ test "${mirror_pos}" = "${last_mirror_pos}" && return 1
+ last_mirror_pos="${mirror_pos}"
+
+ # handle the case where the mirror is ahead of the master
+ master_tag=$(echo "${master_pos}" | grep -Eo "tag_tid=[0-9]*" | cut -d'=' -f 2)
+ mirror_tag=$(echo "${mirror_pos}" | grep -Eo "tag_tid=[0-9]*" | cut -d'=' -f 2)
+ master_entry=$(echo "${master_pos}" | grep -Eo "entry_tid=[0-9]*" | cut -d'=' -f 2)
+ mirror_entry=$(echo "${mirror_pos}" | grep -Eo "entry_tid=[0-9]*" | cut -d'=' -f 2)
+ test "${master_tag}" = "${mirror_tag}" -a ${master_entry} -le ${mirror_entry} && return 0
+ done
+ return 1
+}
+
+mirror_image_snapshot()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster "${cluster}" mirror image snapshot "${pool}/${image}"
+}
+
+get_newest_mirror_snapshot()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local log=$4
+
+ rbd --cluster "${cluster}" snap list --all "${pool}/${image}" --format xml | \
+ xmlstarlet sel -t -c "//snapshots/snapshot[namespace/complete='true' and position()=last()]" > \
+ ${log} || true
+}
+
+wait_for_snapshot_sync_complete()
+{
+ local local_cluster=$1
+ local cluster=$2
+ local pool=$3
+ local image=$4
+
+ local status_log=${TEMPDIR}/$(mkfname ${cluster}-${pool}-${image}.status)
+ local local_status_log=${TEMPDIR}/$(mkfname ${local_cluster}-${pool}-${image}.status)
+
+ mirror_image_snapshot "${cluster}" "${pool}" "${image}"
+ get_newest_mirror_snapshot "${cluster}" "${pool}" "${image}" "${status_log}"
+ local snapshot_id=$(xmlstarlet sel -t -v "//snapshot/id" < ${status_log})
+
+ while true; do
+ for s in 0.2 0.4 0.8 1.6 2 2 4 4 8 8 16 16 32 32; do
+ sleep ${s}
+
+ get_newest_mirror_snapshot "${local_cluster}" "${pool}" "${image}" "${local_status_log}"
+ local primary_snapshot_id=$(xmlstarlet sel -t -v "//snapshot/namespace/primary_snap_id" < ${local_status_log})
+
+ test "${snapshot_id}" = "${primary_snapshot_id}" && return 0
+ done
+
+ return 1
+ done
+ return 1
+}
+
+wait_for_replay_complete()
+{
+ local local_cluster=$1
+ local cluster=$2
+ local pool=$3
+ local image=$4
+
+ if [ "${MIRROR_IMAGE_MODE}" = "journal" ]; then
+ wait_for_journal_replay_complete ${local_cluster} ${cluster} ${pool} ${image}
+ elif [ "${MIRROR_IMAGE_MODE}" = "snapshot" ]; then
+ wait_for_snapshot_sync_complete ${local_cluster} ${cluster} ${pool} ${image}
+ else
+ return 1
+ fi
+}
+
+
+test_status_in_pool_dir()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local state_pattern="$4"
+ local description_pattern="$5"
+ local service_pattern="$6"
+
+ local status_log=${TEMPDIR}/$(mkfname ${cluster}-${pool}-${image}.mirror_status)
+ CEPH_ARGS='' rbd --cluster ${cluster} mirror image status ${pool}/${image} |
+ tee ${status_log} >&2
+ grep "^ state: .*${state_pattern}" ${status_log} || return 1
+ grep "^ description: .*${description_pattern}" ${status_log} || return 1
+
+ if [ -n "${service_pattern}" ]; then
+ grep "service: *${service_pattern}" ${status_log} || return 1
+ elif echo ${state_pattern} | grep '^up+'; then
+ grep "service: *${MIRROR_USER_ID_PREFIX}.* on " ${status_log} || return 1
+ else
+ grep "service: " ${status_log} && return 1
+ fi
+
+ # recheck using `mirror pool status` command to stress test it.
+
+ local last_update="$(sed -nEe 's/^ last_update: *(.*) *$/\1/p' ${status_log})"
+ test_mirror_pool_status_verbose \
+ ${cluster} ${pool} ${image} "${state_pattern}" "${last_update}" &&
+ return 0
+
+ echo "'mirror pool status' test failed" >&2
+ exit 1
+}
+
+test_mirror_pool_status_verbose()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local state_pattern="$4"
+ local prev_last_update="$5"
+
+ local status_log=${TEMPDIR}/$(mkfname ${cluster}-${pool}.mirror_status)
+
+ rbd --cluster ${cluster} mirror pool status ${pool} --verbose --format xml \
+ > ${status_log}
+
+ local last_update state
+ last_update=$($XMLSTARLET sel -t -v \
+ "//images/image[name='${image}']/last_update" < ${status_log})
+ state=$($XMLSTARLET sel -t -v \
+ "//images/image[name='${image}']/state" < ${status_log})
+
+ echo "${state}" | grep "${state_pattern}" ||
+ test "${last_update}" '>' "${prev_last_update}"
+}
+
+wait_for_status_in_pool_dir()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local state_pattern="$4"
+ local description_pattern="$5"
+ local service_pattern="$6"
+
+ for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
+ sleep ${s}
+ test_status_in_pool_dir ${cluster} ${pool} ${image} "${state_pattern}" \
+ "${description_pattern}" "${service_pattern}" &&
+ return 0
+ done
+ return 1
+}
+
+create_image()
+{
+ local cluster=$1 ; shift
+ local pool=$1 ; shift
+ local image=$1 ; shift
+ local size=128
+
+ if [ -n "$1" ]; then
+ size=$1
+ shift
+ fi
+
+ rbd --cluster ${cluster} create --size ${size} \
+ --image-feature "${RBD_IMAGE_FEATURES}" $@ ${pool}/${image}
+}
+
+create_image_and_enable_mirror()
+{
+ local cluster=$1 ; shift
+ local pool=$1 ; shift
+ local image=$1 ; shift
+ local mode=${1:-${MIRROR_IMAGE_MODE}}
+ if [ -n "$1" ]; then
+ shift
+ fi
+
+ create_image ${cluster} ${pool} ${image} $@
+ if [ "${MIRROR_POOL_MODE}" = "image" ] || [ "$pool" = "${PARENT_POOL}" ]; then
+ enable_mirror ${cluster} ${pool} ${image} ${mode}
+ fi
+}
+
+enable_journaling()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster ${cluster} feature enable ${pool}/${image} journaling
+}
+
+set_image_meta()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local key=$4
+ local val=$5
+
+ rbd --cluster ${cluster} image-meta set ${pool}/${image} $key $val
+}
+
+compare_image_meta()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local key=$4
+ local value=$5
+
+ test `rbd --cluster ${cluster} image-meta get ${pool}/${image} ${key}` = "${value}"
+}
+
+rename_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local new_name=$4
+
+ rbd --cluster=${cluster} rename ${pool}/${image} ${pool}/${new_name}
+}
+
+remove_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster=${cluster} snap purge ${pool}/${image}
+ rbd --cluster=${cluster} rm ${pool}/${image}
+}
+
+remove_image_retry()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ for s in 0 1 2 4 8 16 32; do
+ sleep ${s}
+ remove_image ${cluster} ${pool} ${image} && return 0
+ done
+ return 1
+}
+
+trash_move() {
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster=${cluster} trash move ${pool}/${image}
+}
+
+trash_restore() {
+ local cluster=$1
+ local pool=$2
+ local image_id=$3
+
+ rbd --cluster=${cluster} trash restore ${pool}/${image_id}
+}
+
+clone_image()
+{
+ local cluster=$1
+ local parent_pool=$2
+ local parent_image=$3
+ local parent_snap=$4
+ local clone_pool=$5
+ local clone_image=$6
+
+ shift 6
+
+ rbd --cluster ${cluster} clone \
+ ${parent_pool}/${parent_image}@${parent_snap} \
+ ${clone_pool}/${clone_image} --image-feature "${RBD_IMAGE_FEATURES}" $@
+}
+
+clone_image_and_enable_mirror()
+{
+ local cluster=$1
+ local parent_pool=$2
+ local parent_image=$3
+ local parent_snap=$4
+ local clone_pool=$5
+ local clone_image=$6
+ shift 6
+
+ local mode=${1:-${MIRROR_IMAGE_MODE}}
+ if [ -n "$1" ]; then
+ shift
+ fi
+
+ clone_image ${cluster} ${parent_pool} ${parent_image} ${parent_snap} ${clone_pool} ${clone_image} $@
+ enable_mirror ${cluster} ${clone_pool} ${clone_image} ${mode}
+}
+
+disconnect_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster ${cluster} journal client disconnect \
+ --image ${pool}/${image}
+}
+
+create_snapshot()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap=$4
+
+ rbd --cluster ${cluster} snap create ${pool}/${image}@${snap}
+}
+
+remove_snapshot()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap=$4
+
+ rbd --cluster ${cluster} snap rm ${pool}/${image}@${snap}
+}
+
+rename_snapshot()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap=$4
+ local new_snap=$5
+
+ rbd --cluster ${cluster} snap rename ${pool}/${image}@${snap} \
+ ${pool}/${image}@${new_snap}
+}
+
+purge_snapshots()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster ${cluster} snap purge ${pool}/${image}
+}
+
+protect_snapshot()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap=$4
+
+ rbd --cluster ${cluster} snap protect ${pool}/${image}@${snap}
+}
+
+unprotect_snapshot()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap=$4
+
+ rbd --cluster ${cluster} snap unprotect ${pool}/${image}@${snap}
+}
+
+unprotect_snapshot_retry()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap=$4
+
+ for s in 0 1 2 4 8 16 32; do
+ sleep ${s}
+ unprotect_snapshot ${cluster} ${pool} ${image} ${snap} && return 0
+ done
+ return 1
+}
+
+wait_for_snap_present()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap_name=$4
+ local s
+
+ for s in 1 2 4 8 8 8 8 8 8 8 8 16 16 16 16 32 32 32 32; do
+ sleep ${s}
+ rbd --cluster ${cluster} info ${pool}/${image}@${snap_name} || continue
+ return 0
+ done
+ return 1
+}
+
+test_snap_moved_to_trash()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap_name=$4
+
+ rbd --cluster ${cluster} snap ls ${pool}/${image} --all |
+ grep -F " trash (${snap_name})"
+}
+
+wait_for_snap_moved_to_trash()
+{
+ local s
+
+ for s in 1 2 4 8 8 8 8 8 8 8 8 16 16 16 16 32 32 32 32; do
+ sleep ${s}
+ test_snap_moved_to_trash $@ || continue
+ return 0
+ done
+ return 1
+}
+
+test_snap_removed_from_trash()
+{
+ test_snap_moved_to_trash $@ && return 1
+ return 0
+}
+
+wait_for_snap_removed_from_trash()
+{
+ local s
+
+ for s in 1 2 4 8 8 8 8 8 8 8 8 16 16 16 16 32 32 32 32; do
+ sleep ${s}
+ test_snap_removed_from_trash $@ || continue
+ return 0
+ done
+ return 1
+}
+
+count_mirror_snaps()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster ${cluster} snap ls ${pool}/${image} --all |
+ grep -c -F " mirror ("
+}
+
+write_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local count=$4
+ local size=$5
+
+ test -n "${size}" || size=4096
+
+ rbd --cluster ${cluster} bench ${pool}/${image} --io-type write \
+ --io-size ${size} --io-threads 1 --io-total $((size * count)) \
+ --io-pattern rand
+}
+
+stress_write_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local duration=$(awk 'BEGIN {srand(); print int(10 * rand()) + 5}')
+
+ set +e
+ timeout ${duration}s ceph_test_rbd_mirror_random_write \
+ --cluster ${cluster} ${pool} ${image} \
+ --debug-rbd=20 --debug-journaler=20 \
+ 2> ${TEMPDIR}/rbd-mirror-random-write.log
+ error_code=$?
+ set -e
+
+ if [ $error_code -eq 124 ]; then
+ return 0
+ fi
+ return 1
+}
+
+show_diff()
+{
+ local file1=$1
+ local file2=$2
+
+ xxd ${file1} > ${file1}.xxd
+ xxd ${file2} > ${file2}.xxd
+ sdiff -s ${file1}.xxd ${file2}.xxd | head -n 64
+ rm -f ${file1}.xxd ${file2}.xxd
+}
+
+compare_images()
+{
+ local pool=$1
+ local image=$2
+ local ret=0
+
+ local rmt_export=${TEMPDIR}/$(mkfname ${CLUSTER2}-${pool}-${image}.export)
+ local loc_export=${TEMPDIR}/$(mkfname ${CLUSTER1}-${pool}-${image}.export)
+
+ rm -f ${rmt_export} ${loc_export}
+ rbd --cluster ${CLUSTER2} export ${pool}/${image} ${rmt_export}
+ rbd --cluster ${CLUSTER1} export ${pool}/${image} ${loc_export}
+ if ! cmp ${rmt_export} ${loc_export}
+ then
+ show_diff ${rmt_export} ${loc_export}
+ ret=1
+ fi
+ rm -f ${rmt_export} ${loc_export}
+ return ${ret}
+}
+
+compare_image_snapshots()
+{
+ local pool=$1
+ local image=$2
+ local ret=0
+
+ local rmt_export=${TEMPDIR}/${CLUSTER2}-${pool}-${image}.export
+ local loc_export=${TEMPDIR}/${CLUSTER1}-${pool}-${image}.export
+
+ for snap_name in $(rbd --cluster ${CLUSTER1} --format xml \
+ snap list ${pool}/${image} | \
+ $XMLSTARLET sel -t -v "//snapshot/name" | \
+ grep -E -v "^\.rbd-mirror\."); do
+ rm -f ${rmt_export} ${loc_export}
+ rbd --cluster ${CLUSTER2} export ${pool}/${image}@${snap_name} ${rmt_export}
+ rbd --cluster ${CLUSTER1} export ${pool}/${image}@${snap_name} ${loc_export}
+ if ! cmp ${rmt_export} ${loc_export}
+ then
+ show_diff ${rmt_export} ${loc_export}
+ ret=1
+ fi
+ done
+ rm -f ${rmt_export} ${loc_export}
+ return ${ret}
+}
+
+demote_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster=${cluster} mirror image demote ${pool}/${image}
+}
+
+promote_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local force=$4
+
+ rbd --cluster=${cluster} mirror image promote ${pool}/${image} ${force}
+}
+
+set_pool_mirror_mode()
+{
+ local cluster=$1
+ local pool=$2
+ local mode=${3:-${MIRROR_POOL_MODE}}
+
+ rbd --cluster=${cluster} mirror pool enable ${pool} ${mode}
+}
+
+disable_mirror()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster=${cluster} mirror image disable ${pool}/${image}
+}
+
+enable_mirror()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local mode=${4:-${MIRROR_IMAGE_MODE}}
+
+ rbd --cluster=${cluster} mirror image enable ${pool}/${image} ${mode}
+ # Display image info including the global image id for debugging purpose
+ rbd --cluster=${cluster} info ${pool}/${image}
+}
+
+test_image_present()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local test_state=$4
+ local image_id=$5
+ local current_state=deleted
+ local current_image_id
+
+ current_image_id=$(get_image_id ${cluster} ${pool} ${image})
+ test -n "${current_image_id}" &&
+ test -z "${image_id}" -o "${image_id}" = "${current_image_id}" &&
+ current_state=present
+
+ test "${test_state}" = "${current_state}"
+}
+
+wait_for_image_present()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local state=$4
+ local image_id=$5
+ local s
+
+ test -n "${image_id}" ||
+ image_id=$(get_image_id ${cluster} ${pool} ${image})
+
+ # TODO: add a way to force rbd-mirror to update replayers
+ for s in 0.1 1 2 4 8 8 8 8 8 8 8 8 16 16 32 32; do
+ sleep ${s}
+ test_image_present \
+ "${cluster}" "${pool}" "${image}" "${state}" "${image_id}" &&
+ return 0
+ done
+ return 1
+}
+
+get_image_id()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster=${cluster} info ${pool}/${image} |
+ sed -ne 's/^.*block_name_prefix: rbd_data\.//p'
+}
+
+request_resync_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local image_id_var_name=$4
+
+ eval "${image_id_var_name}='$(get_image_id ${cluster} ${pool} ${image})'"
+ eval 'test -n "$'${image_id_var_name}'"'
+
+ rbd --cluster=${cluster} mirror image resync ${pool}/${image}
+}
+
+get_image_data_pool()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster ${cluster} info ${pool}/${image} |
+ awk '$1 == "data_pool:" {print $2}'
+}
+
+get_clone_format()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster ${cluster} info ${pool}/${image} |
+ awk 'BEGIN {
+ format = 1
+ }
+ $1 == "parent:" {
+ parent = $2
+ }
+ /op_features: .*clone-child/ {
+ format = 2
+ }
+ END {
+ if (!parent) exit 1
+ print format
+ }'
+}
+
+list_omap_keys()
+{
+ local cluster=$1
+ local pool=$2
+ local obj_name=$3
+
+ rados --cluster ${cluster} -p ${pool} listomapkeys ${obj_name}
+}
+
+count_omap_keys_with_filter()
+{
+ local cluster=$1
+ local pool=$2
+ local obj_name=$3
+ local filter=$4
+
+ list_omap_keys ${cluster} ${pool} ${obj_name} | grep -c ${filter}
+}
+
+wait_for_omap_keys()
+{
+ local cluster=$1
+ local pool=$2
+ local obj_name=$3
+ local filter=$4
+
+ for s in 0 1 2 2 4 4 8 8 8 16 16 32; do
+ sleep $s
+
+ set +e
+ test "$(count_omap_keys_with_filter ${cluster} ${pool} ${obj_name} ${filter})" = 0
+ error_code=$?
+ set -e
+
+ if [ $error_code -eq 0 ]; then
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+wait_for_image_in_omap()
+{
+ local cluster=$1
+ local pool=$2
+
+ wait_for_omap_keys ${cluster} ${pool} rbd_mirroring status_global
+ wait_for_omap_keys ${cluster} ${pool} rbd_mirroring image_
+ wait_for_omap_keys ${cluster} ${pool} rbd_mirror_leader image_map
+}
+
+#
+# Main
+#
+
+if [ "$#" -gt 0 ]
+then
+ if [ -z "${RBD_MIRROR_TEMDIR}" ]
+ then
+ echo "RBD_MIRROR_TEMDIR is not set" >&2
+ exit 1
+ fi
+
+ TEMPDIR="${RBD_MIRROR_TEMDIR}"
+ cd ${TEMPDIR}
+ $@
+ exit $?
+fi
diff --git a/qa/workunits/rbd/rbd_mirror_journal.sh b/qa/workunits/rbd/rbd_mirror_journal.sh
new file mode 100755
index 000000000..54f6aeec8
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_journal.sh
@@ -0,0 +1,614 @@
+#!/bin/sh -ex
+#
+# rbd_mirror_journal.sh - test rbd-mirror daemon in journal-based mirroring mode
+#
+# The scripts starts two ("local" and "remote") clusters using mstart.sh script,
+# creates a temporary directory, used for cluster configs, daemon logs, admin
+# socket, temporary files, and launches rbd-mirror daemon.
+#
+
+. $(dirname $0)/rbd_mirror_helpers.sh
+
+setup
+
+testlog "TEST: add image and test replay"
+start_mirrors ${CLUSTER1}
+image=test
+create_image ${CLUSTER2} ${POOL} ${image}
+set_image_meta ${CLUSTER2} ${POOL} ${image} "key1" "value1"
+set_image_meta ${CLUSTER2} ${POOL} ${image} "key2" "value2"
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'down+unknown'
+fi
+compare_images ${POOL} ${image}
+compare_image_meta ${CLUSTER1} ${POOL} ${image} "key1" "value1"
+compare_image_meta ${CLUSTER1} ${POOL} ${image} "key2" "value2"
+
+testlog "TEST: stop mirror, add image, start mirror and test replay"
+stop_mirrors ${CLUSTER1}
+image1=test1
+create_image ${CLUSTER2} ${POOL} ${image1}
+write_image ${CLUSTER2} ${POOL} ${image1} 100
+start_mirrors ${CLUSTER1}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image1}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying' 'primary_position'
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image1} 'down+unknown'
+fi
+compare_images ${POOL} ${image1}
+
+testlog "TEST: test the first image is replaying after restart"
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+compare_images ${POOL} ${image}
+
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ testlog "TEST: stop/start/restart mirror via admin socket"
+ all_admin_daemons ${CLUSTER1} rbd mirror stop
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror start
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror stop
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror start ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+
+ all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ flush ${CLUSTER1}
+ all_admin_daemons ${CLUSTER1} rbd mirror status
+fi
+
+remove_image_retry ${CLUSTER2} ${POOL} ${image1}
+
+testlog "TEST: test image rename"
+new_name="${image}_RENAMED"
+rename_image ${CLUSTER2} ${POOL} ${image} ${new_name}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
+admin_daemons ${CLUSTER1} rbd mirror status ${POOL}/${new_name}
+admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${new_name}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
+rename_image ${CLUSTER2} ${POOL} ${new_name} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+
+testlog "TEST: test trash move restore"
+image_id=$(get_image_id ${CLUSTER2} ${POOL} ${image})
+trash_move ${CLUSTER2} ${POOL} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+trash_restore ${CLUSTER2} ${POOL} ${image_id}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+
+testlog "TEST: check if removed images' OMAP are removed (with rbd-mirror on one cluster)"
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+
+wait_for_image_in_omap ${CLUSTER1} ${POOL}
+wait_for_image_in_omap ${CLUSTER2} ${POOL}
+
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+
+testlog "TEST: failover and failback"
+start_mirrors ${CLUSTER2}
+
+# demote and promote same cluster
+demote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+compare_images ${POOL} ${image}
+
+# failover (unmodified)
+demote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
+
+# failback (unmodified)
+demote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+compare_images ${POOL} ${image}
+
+# failover
+demote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
+write_image ${CLUSTER1} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying' 'primary_position'
+compare_images ${POOL} ${image}
+
+# failback
+demote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+compare_images ${POOL} ${image}
+
+testlog "TEST: failover / failback loop"
+for i in `seq 1 20`; do
+ demote_image ${CLUSTER2} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+ promote_image ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
+ wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying'
+ demote_image ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+ promote_image ${CLUSTER2} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+done
+
+testlog "TEST: force promote"
+force_promote_image=test_force_promote
+create_image ${CLUSTER2} ${POOL} ${force_promote_image}
+write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${force_promote_image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${force_promote_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+replaying' 'primary_position'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
+promote_image ${CLUSTER1} ${POOL} ${force_promote_image} '--force'
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${force_promote_image}
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
+write_image ${CLUSTER1} ${POOL} ${force_promote_image} 100
+write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
+remove_image_retry ${CLUSTER1} ${POOL} ${force_promote_image}
+remove_image_retry ${CLUSTER2} ${POOL} ${force_promote_image}
+
+testlog "TEST: cloned images"
+testlog " - default"
+parent_image=test_parent
+parent_snap=snap
+create_image ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+write_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} 100
+create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+protect_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+
+clone_image=test_clone
+clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} ${clone_image}
+write_image ${CLUSTER2} ${POOL} ${clone_image} 100
+
+enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image} journal
+wait_for_image_replay_started ${CLUSTER1} ${PARENT_POOL} ${parent_image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${PARENT_POOL} ${parent_image} 'up+replaying' 'primary_position'
+compare_images ${PARENT_POOL} ${parent_image}
+
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${clone_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${clone_image} 'up+replaying' 'primary_position'
+compare_images ${POOL} ${clone_image}
+remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}
+
+testlog " - clone v1"
+clone_image ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} ${clone_image}1
+
+clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} \
+ ${clone_image}_v1 --rbd-default-clone-format 1
+test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v1) = 1
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v1
+test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v1) = 1
+remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v1
+remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}1
+unprotect_snapshot_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+
+testlog " - clone v2"
+parent_snap=snap_v2
+create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} \
+ ${clone_image}_v2 --rbd-default-clone-format 2
+test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v2) = 2
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v2
+test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v2) = 2
+
+remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+test_snap_moved_to_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v2
+wait_for_image_present ${CLUSTER1} ${POOL} ${clone_image}_v2 'deleted'
+test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+
+testlog " - clone v2 non-primary"
+create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+wait_for_snap_present ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+clone_image ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} \
+ ${clone_image}_v2 --rbd-default-clone-format 2
+remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}_v2
+wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_image_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+
+testlog "TEST: data pool"
+dp_image=test_data_pool
+create_image ${CLUSTER2} ${POOL} ${dp_image} 128 --data-pool ${PARENT_POOL}
+data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${dp_image}
+data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap1'
+write_image ${CLUSTER2} ${POOL} ${dp_image} 100
+create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap2'
+write_image ${CLUSTER2} ${POOL} ${dp_image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${dp_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${dp_image} 'up+replaying' 'primary_position'
+compare_images ${POOL} ${dp_image}@snap1
+compare_images ${POOL} ${dp_image}@snap2
+compare_images ${POOL} ${dp_image}
+remove_image_retry ${CLUSTER2} ${POOL} ${dp_image}
+
+testlog "TEST: disable mirroring / delete non-primary image"
+image2=test2
+image3=test3
+image4=test4
+image5=test5
+for i in ${image2} ${image3} ${image4} ${image5}; do
+ create_image ${CLUSTER2} ${POOL} ${i}
+ write_image ${CLUSTER2} ${POOL} ${i} 100
+ create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
+ create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
+ if [ "${i}" = "${image4}" ] || [ "${i}" = "${image5}" ]; then
+ protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
+ protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
+ fi
+ write_image ${CLUSTER2} ${POOL} ${i} 100
+ wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'present'
+ wait_for_snap_present ${CLUSTER1} ${POOL} ${i} 'snap2'
+done
+
+set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
+for i in ${image2} ${image4}; do
+ disable_mirror ${CLUSTER2} ${POOL} ${i}
+done
+
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap1'
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap2'
+for i in ${image3} ${image5}; do
+ remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
+ remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
+ remove_image_retry ${CLUSTER2} ${POOL} ${i}
+done
+
+for i in ${image2} ${image3} ${image4} ${image5}; do
+ wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'deleted'
+done
+
+set_pool_mirror_mode ${CLUSTER2} ${POOL} 'pool'
+for i in ${image2} ${image4}; do
+ enable_journaling ${CLUSTER2} ${POOL} ${i}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'present'
+ wait_for_snap_present ${CLUSTER1} ${POOL} ${i} 'snap2'
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${i}
+ wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${i}
+ compare_images ${POOL} ${i}
+done
+
+testlog "TEST: remove mirroring pool"
+pool=pool_to_remove
+for cluster in ${CLUSTER1} ${CLUSTER2}; do
+ CEPH_ARGS='' ceph --cluster ${cluster} osd pool create ${pool} 16 16
+ CEPH_ARGS='' rbd --cluster ${cluster} pool init ${pool}
+ rbd --cluster ${cluster} mirror pool enable ${pool} pool
+done
+peer_add ${CLUSTER1} ${pool} ${CLUSTER2}
+peer_add ${CLUSTER2} ${pool} ${CLUSTER1}
+rdp_image=test_remove_data_pool
+create_image ${CLUSTER2} ${pool} ${image} 128
+create_image ${CLUSTER2} ${POOL} ${rdp_image} 128 --data-pool ${pool}
+write_image ${CLUSTER2} ${pool} ${image} 100
+write_image ${CLUSTER2} ${POOL} ${rdp_image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${pool} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${pool} ${image} 'up+replaying' 'primary_position'
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${rdp_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${rdp_image} 'up+replaying' 'primary_position'
+for cluster in ${CLUSTER1} ${CLUSTER2}; do
+ CEPH_ARGS='' ceph --cluster ${cluster} osd pool rm ${pool} ${pool} --yes-i-really-really-mean-it
+done
+remove_image_retry ${CLUSTER2} ${POOL} ${rdp_image}
+wait_for_image_present ${CLUSTER1} ${POOL} ${rdp_image} 'deleted'
+for i in 0 1 2 4 8 8 8 8 16 16; do
+ sleep $i
+ admin_daemons "${CLUSTER2}" rbd mirror status ${pool}/${image} || break
+done
+admin_daemons "${CLUSTER2}" rbd mirror status ${pool}/${image} && false
+
+testlog "TEST: snapshot rename"
+snap_name='snap_rename'
+create_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_0"
+for i in `seq 1 20`; do
+ rename_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_$(expr ${i} - 1)" "${snap_name}_${i}"
+done
+wait_for_snap_present ${CLUSTER1} ${POOL} ${image2} "${snap_name}_${i}"
+
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap1'
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap2'
+for i in ${image2} ${image4}; do
+ remove_image_retry ${CLUSTER2} ${POOL} ${i}
+done
+
+testlog "TEST: disable mirror while daemon is stopped"
+stop_mirrors ${CLUSTER1}
+stop_mirrors ${CLUSTER2}
+set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
+disable_mirror ${CLUSTER2} ${POOL} ${image}
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ test_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+fi
+start_mirrors ${CLUSTER1}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+set_pool_mirror_mode ${CLUSTER2} ${POOL} 'pool'
+enable_journaling ${CLUSTER2} ${POOL} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+
+testlog "TEST: non-default namespace image mirroring"
+testlog " - replay"
+create_image ${CLUSTER2} ${POOL}/${NS1} ${image}
+create_image ${CLUSTER2} ${POOL}/${NS2} ${image}
+enable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image} journal
+wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS2} ${image}
+write_image ${CLUSTER2} ${POOL}/${NS1} ${image} 100
+write_image ${CLUSTER2} ${POOL}/${NS2} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS2} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${image} 'up+replaying' 'primary_position'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS2} ${image} 'up+replaying' 'primary_position'
+compare_images ${POOL}/${NS1} ${image}
+compare_images ${POOL}/${NS2} ${image}
+
+testlog " - disable mirroring / delete image"
+remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${image}
+disable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL}/${NS1} ${image} 'deleted'
+wait_for_image_present ${CLUSTER1} ${POOL}/${NS2} ${image} 'deleted'
+remove_image_retry ${CLUSTER2} ${POOL}/${NS2} ${image}
+
+testlog " - data pool"
+dp_image=test_data_pool
+create_image ${CLUSTER2} ${POOL}/${NS1} ${dp_image} 128 --data-pool ${PARENT_POOL}
+data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL}/${NS1} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${dp_image}
+data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL}/${NS1} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+write_image ${CLUSTER2} ${POOL}/${NS1} ${dp_image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${dp_image} 'up+replaying' 'primary_position'
+compare_images ${POOL}/${NS1} ${dp_image}
+remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
+
+testlog "TEST: simple image resync"
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+compare_images ${POOL} ${image}
+
+testlog "TEST: image resync while replayer is stopped"
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ admin_daemons ${CLUSTER1} rbd mirror stop ${POOL}/${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+ admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+ admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+ compare_images ${POOL} ${image}
+fi
+
+testlog "TEST: request image resync while daemon is offline"
+stop_mirrors ${CLUSTER1}
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+start_mirrors ${CLUSTER1}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+compare_images ${POOL} ${image}
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+
+testlog "TEST: client disconnect"
+image=laggy
+create_image ${CLUSTER2} ${POOL} ${image} 128 --journal-object-size 64K
+write_image ${CLUSTER2} ${POOL} ${image} 10
+
+testlog " - replay stopped after disconnect"
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+disconnect_image ${CLUSTER2} ${POOL} ${image}
+test -z "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
+
+testlog " - replay started after resync requested"
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+compare_images ${POOL} ${image}
+
+testlog " - disconnected after max_concurrent_object_sets reached"
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ admin_daemons ${CLUSTER1} rbd mirror stop ${POOL}/${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+ set_image_meta ${CLUSTER2} ${POOL} ${image} \
+ conf_rbd_journal_max_concurrent_object_sets 1
+ write_image ${CLUSTER2} ${POOL} ${image} 20 16384
+ write_image ${CLUSTER2} ${POOL} ${image} 20 16384
+ test -z "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+ set_image_meta ${CLUSTER2} ${POOL} ${image} \
+ conf_rbd_journal_max_concurrent_object_sets 0
+
+ testlog " - replay is still stopped (disconnected) after restart"
+ admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
+fi
+
+testlog " - replay started after resync requested"
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+compare_images ${POOL} ${image}
+
+testlog " - rbd_mirroring_resync_after_disconnect config option"
+set_image_meta ${CLUSTER2} ${POOL} ${image} \
+ conf_rbd_mirroring_resync_after_disconnect true
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+image_id=$(get_image_id ${CLUSTER1} ${POOL} ${image})
+disconnect_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+compare_images ${POOL} ${image}
+set_image_meta ${CLUSTER2} ${POOL} ${image} \
+ conf_rbd_mirroring_resync_after_disconnect false
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+disconnect_image ${CLUSTER2} ${POOL} ${image}
+test -z "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+
+testlog "TEST: split-brain"
+image=split-brain
+create_image ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+promote_image ${CLUSTER1} ${POOL} ${image} --force
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+write_image ${CLUSTER1} ${POOL} ${image} 10
+demote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'split-brain'
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+
+testlog "TEST: check if removed images' OMAP are removed"
+start_mirrors ${CLUSTER2}
+wait_for_image_in_omap ${CLUSTER1} ${POOL}
+wait_for_image_in_omap ${CLUSTER2} ${POOL}
+
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ # teuthology will trash the daemon
+ testlog "TEST: no blocklists"
+ CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER1} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
+ CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER2} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
+fi
diff --git a/qa/workunits/rbd/rbd_mirror_snapshot.sh b/qa/workunits/rbd/rbd_mirror_snapshot.sh
new file mode 100755
index 000000000..c70d48b09
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_snapshot.sh
@@ -0,0 +1,517 @@
+#!/bin/sh -ex
+#
+# rbd_mirror_snapshot.sh - test rbd-mirror daemon in snapshot-based mirroring mode
+#
+# The scripts starts two ("local" and "remote") clusters using mstart.sh script,
+# creates a temporary directory, used for cluster configs, daemon logs, admin
+# socket, temporary files, and launches rbd-mirror daemon.
+#
+
+MIRROR_POOL_MODE=image
+MIRROR_IMAGE_MODE=snapshot
+
+. $(dirname $0)/rbd_mirror_helpers.sh
+
+setup
+
+testlog "TEST: add image and test replay"
+start_mirrors ${CLUSTER1}
+image=test
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
+set_image_meta ${CLUSTER2} ${POOL} ${image} "key1" "value1"
+set_image_meta ${CLUSTER2} ${POOL} ${image} "key2" "value2"
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'down+unknown'
+fi
+compare_images ${POOL} ${image}
+compare_image_meta ${CLUSTER1} ${POOL} ${image} "key1" "value1"
+compare_image_meta ${CLUSTER1} ${POOL} ${image} "key2" "value2"
+
+testlog "TEST: stop mirror, add image, start mirror and test replay"
+stop_mirrors ${CLUSTER1}
+image1=test1
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image1}
+write_image ${CLUSTER2} ${POOL} ${image1} 100
+start_mirrors ${CLUSTER1}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image1}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image1} 'down+unknown'
+fi
+compare_images ${POOL} ${image1}
+
+testlog "TEST: test the first image is replaying after restart"
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+compare_images ${POOL} ${image}
+
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ testlog "TEST: stop/start/restart mirror via admin socket"
+ all_admin_daemons ${CLUSTER1} rbd mirror stop
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror start
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror stop
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror start ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+
+ all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ flush ${CLUSTER1}
+ all_admin_daemons ${CLUSTER1} rbd mirror status
+fi
+
+remove_image_retry ${CLUSTER2} ${POOL} ${image1}
+
+testlog "TEST: test image rename"
+new_name="${image}_RENAMED"
+rename_image ${CLUSTER2} ${POOL} ${image} ${new_name}
+mirror_image_snapshot ${CLUSTER2} ${POOL} ${new_name}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
+admin_daemons ${CLUSTER1} rbd mirror status ${POOL}/${new_name}
+admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${new_name}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
+rename_image ${CLUSTER2} ${POOL} ${new_name} ${image}
+mirror_image_snapshot ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+
+testlog "TEST: test trash move restore"
+image_id=$(get_image_id ${CLUSTER2} ${POOL} ${image})
+trash_move ${CLUSTER2} ${POOL} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+trash_restore ${CLUSTER2} ${POOL} ${image_id}
+enable_mirror ${CLUSTER2} ${POOL} ${image} snapshot
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+
+testlog "TEST: check if removed images' OMAP are removed (with rbd-mirror on one cluster)"
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+
+wait_for_image_in_omap ${CLUSTER1} ${POOL}
+wait_for_image_in_omap ${CLUSTER2} ${POOL}
+
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+
+testlog "TEST: failover and failback"
+start_mirrors ${CLUSTER2}
+
+# demote and promote same cluster
+demote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+compare_images ${POOL} ${image}
+
+# failover (unmodified)
+demote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
+
+# failback (unmodified)
+demote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+compare_images ${POOL} ${image}
+
+# failover
+demote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
+write_image ${CLUSTER1} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying'
+compare_images ${POOL} ${image}
+
+# failback
+demote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+compare_images ${POOL} ${image}
+
+testlog "TEST: failover / failback loop"
+for i in `seq 1 20`; do
+ demote_image ${CLUSTER2} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+ promote_image ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
+ wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying'
+ demote_image ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+ promote_image ${CLUSTER2} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+done
+# check that demote (or other mirror snapshots) don't pile up
+test "$(count_mirror_snaps ${CLUSTER1} ${POOL} ${image})" -le 3
+test "$(count_mirror_snaps ${CLUSTER2} ${POOL} ${image})" -le 3
+
+testlog "TEST: force promote"
+force_promote_image=test_force_promote
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${force_promote_image}
+write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${force_promote_image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${force_promote_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+replaying'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
+promote_image ${CLUSTER1} ${POOL} ${force_promote_image} '--force'
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${force_promote_image}
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
+write_image ${CLUSTER1} ${POOL} ${force_promote_image} 100
+write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
+remove_image_retry ${CLUSTER1} ${POOL} ${force_promote_image}
+remove_image_retry ${CLUSTER2} ${POOL} ${force_promote_image}
+
+testlog "TEST: cloned images"
+testlog " - default"
+parent_image=test_parent
+parent_snap=snap
+create_image_and_enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+write_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} 100
+create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+protect_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+
+clone_image=test_clone
+clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} ${clone_image}
+write_image ${CLUSTER2} ${POOL} ${clone_image} 100
+enable_mirror ${CLUSTER2} ${POOL} ${clone_image} snapshot
+
+wait_for_image_replay_started ${CLUSTER1} ${PARENT_POOL} ${parent_image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${PARENT_POOL} ${parent_image} 'up+replaying'
+compare_images ${PARENT_POOL} ${parent_image}
+
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${clone_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${clone_image} 'up+replaying'
+compare_images ${POOL} ${clone_image}
+remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}
+
+testlog " - clone v1"
+clone_image_and_enable_mirror ${CLUSTER1} ${PARENT_POOL} ${parent_image} \
+ ${parent_snap} ${POOL} ${clone_image}1
+
+clone_image_and_enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image} \
+ ${parent_snap} ${POOL} ${clone_image}_v1 snapshot --rbd-default-clone-format 1
+test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v1) = 1
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v1
+test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v1) = 1
+remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v1
+remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}1
+unprotect_snapshot_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+
+testlog " - clone v2"
+parent_snap=snap_v2
+create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+clone_image_and_enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image} \
+ ${parent_snap} ${POOL} ${clone_image}_v2 snapshot --rbd-default-clone-format 2
+test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v2) = 2
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v2
+test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v2) = 2
+
+remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+test_snap_moved_to_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v2
+wait_for_image_present ${CLUSTER1} ${POOL} ${clone_image}_v2 'deleted'
+test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+
+testlog " - clone v2 non-primary"
+create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+wait_for_snap_present ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+clone_image_and_enable_mirror ${CLUSTER1} ${PARENT_POOL} ${parent_image} \
+ ${parent_snap} ${POOL} ${clone_image}_v2 snapshot --rbd-default-clone-format 2
+remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}_v2
+wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_image_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+
+testlog "TEST: data pool"
+dp_image=test_data_pool
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${dp_image} snapshot 128 --data-pool ${PARENT_POOL}
+data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${dp_image}
+data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap1'
+write_image ${CLUSTER2} ${POOL} ${dp_image} 100
+create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap2'
+write_image ${CLUSTER2} ${POOL} ${dp_image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${dp_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${dp_image} 'up+replaying'
+compare_images ${POOL} ${dp_image}@snap1
+compare_images ${POOL} ${dp_image}@snap2
+compare_images ${POOL} ${dp_image}
+remove_image_retry ${CLUSTER2} ${POOL} ${dp_image}
+
+testlog "TEST: disable mirroring / delete non-primary image"
+image2=test2
+image3=test3
+image4=test4
+image5=test5
+for i in ${image2} ${image3} ${image4} ${image5}; do
+ create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${i}
+ write_image ${CLUSTER2} ${POOL} ${i} 100
+ create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
+ create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
+ if [ "${i}" = "${image4}" ] || [ "${i}" = "${image5}" ]; then
+ protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
+ protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
+ fi
+ write_image ${CLUSTER2} ${POOL} ${i} 100
+ mirror_image_snapshot ${CLUSTER2} ${POOL} ${i}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'present'
+ wait_for_snap_present ${CLUSTER1} ${POOL} ${i} 'snap2'
+done
+
+set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
+for i in ${image2} ${image4}; do
+ disable_mirror ${CLUSTER2} ${POOL} ${i}
+done
+
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap1'
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap2'
+for i in ${image3} ${image5}; do
+ remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
+ remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
+ remove_image_retry ${CLUSTER2} ${POOL} ${i}
+done
+
+for i in ${image2} ${image3} ${image4} ${image5}; do
+ wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'deleted'
+done
+
+testlog "TEST: snapshot rename"
+snap_name='snap_rename'
+enable_mirror ${CLUSTER2} ${POOL} ${image2}
+create_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_0"
+for i in `seq 1 20`; do
+ rename_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_$(expr ${i} - 1)" "${snap_name}_${i}"
+done
+mirror_image_snapshot ${CLUSTER2} ${POOL} ${image2}
+wait_for_snap_present ${CLUSTER1} ${POOL} ${image2} "${snap_name}_${i}"
+
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap1'
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap2'
+for i in ${image2} ${image4}; do
+ remove_image_retry ${CLUSTER2} ${POOL} ${i}
+done
+
+testlog "TEST: disable mirror while daemon is stopped"
+stop_mirrors ${CLUSTER1}
+stop_mirrors ${CLUSTER2}
+disable_mirror ${CLUSTER2} ${POOL} ${image}
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ test_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+fi
+start_mirrors ${CLUSTER1}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+enable_mirror ${CLUSTER2} ${POOL} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+
+testlog "TEST: non-default namespace image mirroring"
+testlog " - replay"
+create_image_and_enable_mirror ${CLUSTER2} ${POOL}/${NS1} ${image}
+create_image_and_enable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS2} ${image}
+write_image ${CLUSTER2} ${POOL}/${NS1} ${image} 100
+write_image ${CLUSTER2} ${POOL}/${NS2} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS2} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${image} 'up+replaying'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS2} ${image} 'up+replaying'
+compare_images ${POOL}/${NS1} ${image}
+compare_images ${POOL}/${NS2} ${image}
+
+testlog " - disable mirroring / delete image"
+remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${image}
+disable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL}/${NS1} ${image} 'deleted'
+wait_for_image_present ${CLUSTER1} ${POOL}/${NS2} ${image} 'deleted'
+remove_image_retry ${CLUSTER2} ${POOL}/${NS2} ${image}
+
+testlog " - data pool"
+dp_image=test_data_pool
+create_image_and_enable_mirror ${CLUSTER2} ${POOL}/${NS1} ${dp_image} snapshot 128 --data-pool ${PARENT_POOL}
+data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL}/${NS1} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${dp_image}
+data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL}/${NS1} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+write_image ${CLUSTER2} ${POOL}/${NS1} ${dp_image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${dp_image} 'up+replaying'
+compare_images ${POOL}/${NS1} ${dp_image}
+remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
+
+testlog "TEST: simple image resync"
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+compare_images ${POOL} ${image}
+
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ testlog "TEST: image resync while replayer is stopped"
+ admin_daemons ${CLUSTER1} rbd mirror stop ${POOL}/${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+ admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+ admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ compare_images ${POOL} ${image}
+fi
+
+testlog "TEST: request image resync while daemon is offline"
+stop_mirrors ${CLUSTER1}
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+start_mirrors ${CLUSTER1}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+compare_images ${POOL} ${image}
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+
+testlog "TEST: split-brain"
+image=split-brain
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+promote_image ${CLUSTER1} ${POOL} ${image} --force
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+write_image ${CLUSTER1} ${POOL} ${image} 10
+demote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'split-brain'
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+
+testlog "TEST: check if removed images' OMAP are removed"
+start_mirrors ${CLUSTER2}
+wait_for_image_in_omap ${CLUSTER1} ${POOL}
+wait_for_image_in_omap ${CLUSTER2} ${POOL}
+
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ # teuthology will trash the daemon
+ testlog "TEST: no blocklists"
+ CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER1} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
+ CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER2} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
+fi
diff --git a/qa/workunits/rbd/rbd_mirror_stress.sh b/qa/workunits/rbd/rbd_mirror_stress.sh
new file mode 100755
index 000000000..cb79aba7e
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_stress.sh
@@ -0,0 +1,221 @@
+#!/bin/sh -ex
+#
+# rbd_mirror_stress.sh - stress test rbd-mirror daemon
+#
+# The following additional environment variables affect the test:
+#
+# RBD_MIRROR_REDUCE_WRITES - if not empty, don't run the stress bench write
+# tool during the many image test
+#
+
+IMAGE_COUNT=50
+export LOCKDEP=0
+
+. $(dirname $0)/rbd_mirror_helpers.sh
+
+setup
+
+create_snap()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap_name=$4
+
+ rbd --cluster ${cluster} -p ${pool} snap create ${image}@${snap_name} \
+ --debug-rbd=20 --debug-journaler=20 2> ${TEMPDIR}/rbd-snap-create.log
+}
+
+compare_image_snaps()
+{
+ local pool=$1
+ local image=$2
+ local snap_name=$3
+ local ret=0
+
+ local rmt_export=${TEMPDIR}/${CLUSTER2}-${pool}-${image}.export
+ local loc_export=${TEMPDIR}/${CLUSTER1}-${pool}-${image}.export
+
+ rm -f ${rmt_export} ${loc_export}
+ rbd --cluster ${CLUSTER2} -p ${pool} export ${image}@${snap_name} ${rmt_export}
+ rbd --cluster ${CLUSTER1} -p ${pool} export ${image}@${snap_name} ${loc_export}
+ if ! cmp ${rmt_export} ${loc_export}
+ then
+ show_diff ${rmt_export} ${loc_export}
+ ret=1
+ fi
+ rm -f ${rmt_export} ${loc_export}
+ return ${ret}
+}
+
+wait_for_pool_images()
+{
+ local cluster=$1
+ local pool=$2
+ local image_count=$3
+ local s
+ local count
+ local last_count=0
+
+ while true; do
+ for s in `seq 1 40`; do
+ test $s -ne 1 && sleep 30
+ count=$(rbd --cluster ${cluster} -p ${pool} mirror pool status | grep 'images: ' | cut -d' ' -f 2)
+ test "${count}" = "${image_count}" && return 0
+
+ # reset timeout if making forward progress
+ test $count -ne $last_count && break
+ done
+
+ test $count -eq $last_count && break
+ last_count=$count
+ done
+ rbd --cluster ${cluster} -p ${pool} mirror pool status --verbose >&2
+ return 1
+}
+
+wait_for_pool_healthy()
+{
+ local cluster=$1
+ local pool=$2
+ local s
+ local state
+
+ for s in `seq 1 40`; do
+ test $s -ne 1 && sleep 30
+ state=$(rbd --cluster ${cluster} -p ${pool} mirror pool status | grep 'image health:' | cut -d' ' -f 3)
+ test "${state}" = "ERROR" && break
+ test "${state}" = "OK" && return 0
+ done
+ rbd --cluster ${cluster} -p ${pool} mirror pool status --verbose >&2
+ return 1
+}
+
+start_mirrors ${CLUSTER1}
+start_mirrors ${CLUSTER2}
+
+testlog "TEST: add image and test replay after client crashes"
+image=test
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image} ${MIRROR_IMAGE_MODE} '512M'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+
+clean_snap_name=
+for i in `seq 1 10`
+do
+ stress_write_image ${CLUSTER2} ${POOL} ${image}
+
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+
+ snap_name="snap${i}"
+ create_snap ${CLUSTER2} ${POOL} ${image} ${snap_name}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+ wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${snap_name}
+
+ if [ -n "${clean_snap_name}" ]; then
+ compare_image_snaps ${POOL} ${image} ${clean_snap_name}
+ fi
+ compare_image_snaps ${POOL} ${image} ${snap_name}
+
+ clean_snap_name="snap${i}-clean"
+ create_snap ${CLUSTER2} ${POOL} ${image} ${clean_snap_name}
+done
+
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${clean_snap_name}
+
+for i in `seq 1 10`
+do
+ snap_name="snap${i}"
+ compare_image_snaps ${POOL} ${image} ${snap_name}
+
+ snap_name="snap${i}-clean"
+ compare_image_snaps ${POOL} ${image} ${snap_name}
+done
+
+for i in `seq 1 10`
+do
+ snap_name="snap${i}"
+ remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name}
+
+ snap_name="snap${i}-clean"
+ remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name}
+done
+
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+
+testlog "TEST: create many images"
+snap_name="snap"
+for i in `seq 1 ${IMAGE_COUNT}`
+do
+ image="image_${i}"
+ create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image} ${MIRROR_IMAGE_MODE} '128M'
+ if [ -n "${RBD_MIRROR_REDUCE_WRITES}" ]; then
+ write_image ${CLUSTER2} ${POOL} ${image} 100
+ else
+ stress_write_image ${CLUSTER2} ${POOL} ${image}
+ fi
+done
+
+wait_for_pool_images ${CLUSTER2} ${POOL} ${IMAGE_COUNT}
+wait_for_pool_healthy ${CLUSTER2} ${POOL}
+
+wait_for_pool_images ${CLUSTER1} ${POOL} ${IMAGE_COUNT}
+wait_for_pool_healthy ${CLUSTER1} ${POOL}
+
+testlog "TEST: compare many images"
+for i in `seq 1 ${IMAGE_COUNT}`
+do
+ image="image_${i}"
+ create_snap ${CLUSTER2} ${POOL} ${image} ${snap_name}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+ wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${snap_name}
+ compare_image_snaps ${POOL} ${image} ${snap_name}
+done
+
+testlog "TEST: delete many images"
+for i in `seq 1 ${IMAGE_COUNT}`
+do
+ image="image_${i}"
+ remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name}
+ remove_image_retry ${CLUSTER2} ${POOL} ${image}
+done
+
+testlog "TEST: image deletions should propagate"
+wait_for_pool_images ${CLUSTER1} ${POOL} 0
+wait_for_pool_healthy ${CLUSTER1} ${POOL} 0
+for i in `seq 1 ${IMAGE_COUNT}`
+do
+ image="image_${i}"
+ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+done
+
+testlog "TEST: delete images during bootstrap"
+set_pool_mirror_mode ${CLUSTER1} ${POOL} 'image'
+set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
+
+start_mirror ${CLUSTER1}
+image=test
+
+for i in `seq 1 10`
+do
+ image="image_${i}"
+ create_image ${CLUSTER2} ${POOL} ${image} '512M'
+ enable_mirror ${CLUSTER2} ${POOL} ${image}
+
+ stress_write_image ${CLUSTER2} ${POOL} ${image}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+
+ disable_mirror ${CLUSTER2} ${POOL} ${image}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+ purge_snapshots ${CLUSTER2} ${POOL} ${image}
+ remove_image_retry ${CLUSTER2} ${POOL} ${image}
+done
+
+testlog "TEST: check if removed images' OMAP are removed"
+
+wait_for_image_in_omap ${CLUSTER1} ${POOL}
+wait_for_image_in_omap ${CLUSTER2} ${POOL}
diff --git a/qa/workunits/rbd/rbd_support_module_recovery.sh b/qa/workunits/rbd/rbd_support_module_recovery.sh
new file mode 100755
index 000000000..e9defced2
--- /dev/null
+++ b/qa/workunits/rbd/rbd_support_module_recovery.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+set -ex
+
+POOL=rbd
+IMAGE_PREFIX=image
+NUM_IMAGES=20
+RUN_TIME=3600
+
+rbd mirror pool enable ${POOL} image
+rbd mirror pool peer add ${POOL} dummy
+
+# Create images and schedule their mirror snapshots
+for ((i = 1; i <= ${NUM_IMAGES}; i++)); do
+ rbd create -s 1G --image-feature exclusive-lock ${POOL}/${IMAGE_PREFIX}$i
+ rbd mirror image enable ${POOL}/${IMAGE_PREFIX}$i snapshot
+ rbd mirror snapshot schedule add -p ${POOL} --image ${IMAGE_PREFIX}$i 1m
+done
+
+# Run fio workloads on images via kclient
+# Test the recovery of the rbd_support module and its scheduler from their
+# librbd client being blocklisted while a exclusive lock gets passed around
+# between their librbd client and a kclient trying to take mirror snapshots
+# and perform I/O on the same image.
+for ((i = 1; i <= ${NUM_IMAGES}; i++)); do
+ DEVS[$i]=$(sudo rbd device map ${POOL}/${IMAGE_PREFIX}$i)
+ fio --name=fiotest --filename=${DEVS[$i]} --rw=randrw --bs=4K --direct=1 \
+ --ioengine=libaio --iodepth=2 --runtime=43200 --time_based \
+ &> /dev/null &
+done
+
+# Repeatedly blocklist rbd_support module's client ~10s after the module
+# recovers from previous blocklisting
+CURRENT_TIME=$(date +%s)
+END_TIME=$((CURRENT_TIME + RUN_TIME))
+PREV_CLIENT_ADDR=""
+CLIENT_ADDR=""
+while ((CURRENT_TIME <= END_TIME)); do
+ if [[ -n "${CLIENT_ADDR}" ]] &&
+ [[ "${CLIENT_ADDR}" != "${PREV_CLIENT_ADDR}" ]]; then
+ ceph osd blocklist add ${CLIENT_ADDR}
+ # Confirm rbd_support module's client is blocklisted
+ ceph osd blocklist ls | grep -q ${CLIENT_ADDR}
+ PREV_CLIENT_ADDR=${CLIENT_ADDR}
+ fi
+ sleep 10
+ CLIENT_ADDR=$(ceph mgr dump |
+ jq .active_clients[] |
+ jq 'select(.name == "rbd_support")' |
+ jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
+ CURRENT_TIME=$(date +%s)
+done
+
+# Confirm that rbd_support module recovered from repeated blocklisting
+# Check that you can add a mirror snapshot schedule after a few retries
+for ((i = 1; i <= 24; i++)); do
+ rbd mirror snapshot schedule add -p ${POOL} \
+ --image ${IMAGE_PREFIX}1 2m && break
+ sleep 10
+done
+rbd mirror snapshot schedule ls -p ${POOL} --image ${IMAGE_PREFIX}1 |
+ grep 'every 2m'
+# Verify that the schedule present before client blocklisting is preserved
+rbd mirror snapshot schedule ls -p ${POOL} --image ${IMAGE_PREFIX}1 |
+ grep 'every 1m'
+rbd mirror snapshot schedule rm -p ${POOL} --image ${IMAGE_PREFIX}1 2m
+for ((i = 1; i <= ${NUM_IMAGES}; i++)); do
+ rbd mirror snapshot schedule rm -p ${POOL} --image ${IMAGE_PREFIX}$i 1m
+done
+
+# cleanup
+killall fio || true
+wait
+for ((i = 1; i <= ${NUM_IMAGES}; i++)); do
+ sudo rbd device unmap ${DEVS[$i]}
+done
+
+echo OK
diff --git a/qa/workunits/rbd/read-flags.sh b/qa/workunits/rbd/read-flags.sh
new file mode 100755
index 000000000..7d787ce67
--- /dev/null
+++ b/qa/workunits/rbd/read-flags.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+set -ex
+
+# create a snapshot, then export it and check that setting read flags works
+# by looking at --debug-ms output
+
+function clean_up {
+ rm -f test.log || true
+ rbd snap remove test@snap || true
+ rbd rm test || true
+}
+
+function test_read_flags {
+ local IMAGE=$1
+ local SET_BALANCED=$2
+ local SET_LOCALIZED=$3
+ local EXPECT_BALANCED=$4
+ local EXPECT_LOCALIZED=$5
+
+ local EXTRA_ARGS="--log-file test.log --debug-ms 1 --no-log-to-stderr"
+ if [ "$SET_BALANCED" = 'y' ]; then
+ EXTRA_ARGS="$EXTRA_ARGS --rbd-balance-snap-reads"
+ elif [ "$SET_LOCALIZED" = 'y' ]; then
+ EXTRA_ARGS="$EXTRA_ARGS --rbd-localize-snap-reads"
+ fi
+
+ rbd export $IMAGE - $EXTRA_ARGS > /dev/null
+ if [ "$EXPECT_BALANCED" = 'y' ]; then
+ grep -q balance_reads test.log
+ else
+ grep -L balance_reads test.log | grep -q test.log
+ fi
+ if [ "$EXPECT_LOCALIZED" = 'y' ]; then
+ grep -q localize_reads test.log
+ else
+ grep -L localize_reads test.log | grep -q test.log
+ fi
+ rm -f test.log
+
+}
+
+clean_up
+
+trap clean_up INT TERM EXIT
+
+rbd create --image-feature layering -s 10 test
+rbd snap create test@snap
+
+# export from non snapshot with or without settings should not have flags
+test_read_flags test n n n n
+test_read_flags test y y n n
+
+# export from snapshot should have read flags in log if they are set
+test_read_flags test@snap n n n n
+test_read_flags test@snap y n y n
+test_read_flags test@snap n y n y
+
+# balanced_reads happens to take priority over localize_reads
+test_read_flags test@snap y y y n
+
+echo OK
diff --git a/qa/workunits/rbd/simple_big.sh b/qa/workunits/rbd/simple_big.sh
new file mode 100755
index 000000000..70aafda4c
--- /dev/null
+++ b/qa/workunits/rbd/simple_big.sh
@@ -0,0 +1,12 @@
+#!/bin/sh -ex
+
+mb=100000
+
+rbd create foo --size $mb
+DEV=$(sudo rbd map foo)
+dd if=/dev/zero of=$DEV bs=1M count=$mb
+dd if=$DEV of=/dev/null bs=1M count=$mb
+sudo rbd unmap $DEV
+rbd rm foo
+
+echo OK
diff --git a/qa/workunits/rbd/test_admin_socket.sh b/qa/workunits/rbd/test_admin_socket.sh
new file mode 100755
index 000000000..6b960787b
--- /dev/null
+++ b/qa/workunits/rbd/test_admin_socket.sh
@@ -0,0 +1,151 @@
+#!/usr/bin/env bash
+set -ex
+
+TMPDIR=/tmp/rbd_test_admin_socket$$
+mkdir $TMPDIR
+trap "rm -fr $TMPDIR" 0
+
+. $(dirname $0)/../../standalone/ceph-helpers.sh
+
+function expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+function rbd_watch_out_file()
+{
+ echo ${TMPDIR}/rbd_watch_$1.out
+}
+
+function rbd_watch_pid_file()
+{
+ echo ${TMPDIR}/rbd_watch_$1.pid
+}
+
+function rbd_watch_fifo()
+{
+ echo ${TMPDIR}/rbd_watch_$1.fifo
+}
+
+function rbd_watch_asok()
+{
+ echo ${TMPDIR}/rbd_watch_$1.asok
+}
+
+function rbd_get_perfcounter()
+{
+ local image=$1
+ local counter=$2
+ local name
+
+ name=$(ceph --format xml --admin-daemon $(rbd_watch_asok ${image}) \
+ perf schema | $XMLSTARLET el -d3 |
+ grep "/librbd-.*-${image}/${counter}\$")
+ test -n "${name}" || return 1
+
+ ceph --format xml --admin-daemon $(rbd_watch_asok ${image}) perf dump |
+ $XMLSTARLET sel -t -m "${name}" -v .
+}
+
+function rbd_check_perfcounter()
+{
+ local image=$1
+ local counter=$2
+ local expected_val=$3
+ local val=
+
+ val=$(rbd_get_perfcounter ${image} ${counter})
+
+ test "${val}" -eq "${expected_val}"
+}
+
+function rbd_watch_start()
+{
+ local image=$1
+ local asok=$(rbd_watch_asok ${image})
+
+ mkfifo $(rbd_watch_fifo ${image})
+ (cat $(rbd_watch_fifo ${image}) |
+ rbd --admin-socket ${asok} watch ${image} \
+ > $(rbd_watch_out_file ${image}) 2>&1)&
+
+ # find pid of the started rbd watch process
+ local pid
+ for i in `seq 10`; do
+ pid=$(ps auxww | awk "/[r]bd --admin.* watch ${image}/ {print \$2}")
+ test -n "${pid}" && break
+ sleep 0.1
+ done
+ test -n "${pid}"
+ echo ${pid} > $(rbd_watch_pid_file ${image})
+
+ # find watcher admin socket
+ test -n "${asok}"
+ for i in `seq 10`; do
+ test -S "${asok}" && break
+ sleep 0.1
+ done
+ test -S "${asok}"
+
+ # configure debug level
+ ceph --admin-daemon "${asok}" config set debug_rbd 20
+
+ # check that watcher is registered
+ rbd status ${image} | expect_false grep "Watchers: none"
+}
+
+function rbd_watch_end()
+{
+ local image=$1
+ local regexp=$2
+
+ # send 'enter' to watch to exit
+ echo > $(rbd_watch_fifo ${image})
+ # just in case it is not terminated
+ kill $(cat $(rbd_watch_pid_file ${image})) || :
+
+ # output rbd watch out file for easier troubleshooting
+ cat $(rbd_watch_out_file ${image})
+
+ # cleanup
+ rm -f $(rbd_watch_fifo ${image}) $(rbd_watch_pid_file ${image}) \
+ $(rbd_watch_out_file ${image}) $(rbd_watch_asok ${image})
+}
+
+pool="rbd"
+image=testimg$$
+ceph_admin="ceph --admin-daemon $(rbd_watch_asok ${image})"
+
+rbd create --size 128 ${pool}/${image}
+
+# check rbd cache commands are present in help output
+rbd_cache_flush="rbd cache flush ${pool}/${image}"
+rbd_cache_invalidate="rbd cache invalidate ${pool}/${image}"
+
+rbd_watch_start ${image}
+${ceph_admin} help | fgrep "${rbd_cache_flush}"
+${ceph_admin} help | fgrep "${rbd_cache_invalidate}"
+rbd_watch_end ${image}
+
+# test rbd cache commands with disabled and enabled cache
+for conf_rbd_cache in false true; do
+
+ rbd image-meta set ${image} conf_rbd_cache ${conf_rbd_cache}
+
+ rbd_watch_start ${image}
+
+ rbd_check_perfcounter ${image} flush 0
+ ${ceph_admin} ${rbd_cache_flush}
+ # 'flush' counter should increase regardless if cache is enabled
+ rbd_check_perfcounter ${image} flush 1
+
+ rbd_check_perfcounter ${image} invalidate_cache 0
+ ${ceph_admin} ${rbd_cache_invalidate}
+ # 'invalidate_cache' counter should increase regardless if cache is enabled
+ rbd_check_perfcounter ${image} invalidate_cache 1
+
+ rbd_watch_end ${image}
+done
+
+rbd rm ${image}
diff --git a/qa/workunits/rbd/test_librbd.sh b/qa/workunits/rbd/test_librbd.sh
new file mode 100755
index 000000000..447306bb4
--- /dev/null
+++ b/qa/workunits/rbd/test_librbd.sh
@@ -0,0 +1,9 @@
+#!/bin/sh -e
+
+if [ -n "${VALGRIND}" ]; then
+ valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
+ --error-exitcode=1 ceph_test_librbd
+else
+ ceph_test_librbd
+fi
+exit 0
diff --git a/qa/workunits/rbd/test_librbd_python.sh b/qa/workunits/rbd/test_librbd_python.sh
new file mode 100755
index 000000000..a33100829
--- /dev/null
+++ b/qa/workunits/rbd/test_librbd_python.sh
@@ -0,0 +1,12 @@
+#!/bin/sh -ex
+
+relpath=$(dirname $0)/../../../src/test/pybind
+
+if [ -n "${VALGRIND}" ]; then
+ valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
+ --errors-for-leak-kinds=definite --error-exitcode=1 \
+ python3 -m pytest -v $relpath/test_rbd.py "$@"
+else
+ python3 -m pytest -v $relpath/test_rbd.py "$@"
+fi
+exit 0
diff --git a/qa/workunits/rbd/test_lock_fence.sh b/qa/workunits/rbd/test_lock_fence.sh
new file mode 100755
index 000000000..7cf2d21c5
--- /dev/null
+++ b/qa/workunits/rbd/test_lock_fence.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+# can't use -e because of background process
+set -x
+
+IMAGE=rbdrw-image
+LOCKID=rbdrw
+RELPATH=$(dirname $0)/../../../src/test/librbd
+RBDRW=$RELPATH/rbdrw.py
+
+rbd create $IMAGE --size 10 --image-format 2 --image-shared || exit 1
+
+# rbdrw loops doing I/O to $IMAGE after locking with lockid $LOCKID
+python3 $RBDRW $IMAGE $LOCKID &
+iochild=$!
+
+# give client time to lock and start reading/writing
+LOCKS='[]'
+while [ "$LOCKS" == '[]' ]
+do
+ LOCKS=$(rbd lock list $IMAGE --format json)
+ sleep 1
+done
+
+clientaddr=$(rbd lock list $IMAGE | tail -1 | awk '{print $NF;}')
+clientid=$(rbd lock list $IMAGE | tail -1 | awk '{print $1;}')
+echo "clientaddr: $clientaddr"
+echo "clientid: $clientid"
+
+ceph osd blocklist add $clientaddr || exit 1
+
+wait $iochild
+rbdrw_exitcode=$?
+if [ $rbdrw_exitcode != 108 ]
+then
+ echo "wrong exitcode from rbdrw: $rbdrw_exitcode"
+ exit 1
+else
+ echo "rbdrw stopped with ESHUTDOWN"
+fi
+
+set -e
+ceph osd blocklist rm $clientaddr
+rbd lock remove $IMAGE $LOCKID "$clientid"
+# rbdrw will have exited with an existing watch, so, until #3527 is fixed,
+# hang out until the watch expires
+sleep 30
+rbd rm $IMAGE
+echo OK
diff --git a/qa/workunits/rbd/test_rbd_mirror.sh b/qa/workunits/rbd/test_rbd_mirror.sh
new file mode 100755
index 000000000..e139dd7e4
--- /dev/null
+++ b/qa/workunits/rbd/test_rbd_mirror.sh
@@ -0,0 +1,9 @@
+#!/bin/sh -e
+
+if [ -n "${VALGRIND}" ]; then
+ valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
+ --error-exitcode=1 ceph_test_rbd_mirror
+else
+ ceph_test_rbd_mirror
+fi
+exit 0
diff --git a/qa/workunits/rbd/test_rbd_tasks.sh b/qa/workunits/rbd/test_rbd_tasks.sh
new file mode 100755
index 000000000..b9663e601
--- /dev/null
+++ b/qa/workunits/rbd/test_rbd_tasks.sh
@@ -0,0 +1,276 @@
+#!/usr/bin/env bash
+set -ex
+
+POOL=rbd_tasks
+POOL_NS=ns1
+
+setup() {
+ trap 'cleanup' INT TERM EXIT
+
+ ceph osd pool create ${POOL} 128
+ rbd pool init ${POOL}
+ rbd namespace create ${POOL}/${POOL_NS}
+
+ TEMPDIR=`mktemp -d`
+}
+
+cleanup() {
+ ceph osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
+
+ rm -rf ${TEMPDIR}
+}
+
+wait_for() {
+ local TEST_FN=$1
+ shift 1
+ local TEST_FN_ARGS=("$@")
+
+ for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
+ sleep ${s}
+
+ ${TEST_FN} "${TEST_FN_ARGS[@]}" || continue
+ return 0
+ done
+ return 1
+}
+
+task_exists() {
+ local TASK_ID=$1
+ [[ -z "${TASK_ID}" ]] && exit 1
+
+ ceph rbd task list ${TASK_ID} || return 1
+ return 0
+}
+
+task_dne() {
+ local TASK_ID=$1
+ [[ -z "${TASK_ID}" ]] && exit 1
+
+ ceph rbd task list ${TASK_ID} || return 0
+ return 1
+}
+
+task_in_progress() {
+ local TASK_ID=$1
+ [[ -z "${TASK_ID}" ]] && exit 1
+
+ [[ $(ceph rbd task list ${TASK_ID} | jq '.in_progress') == 'true' ]]
+}
+
+test_remove() {
+ echo "test_remove"
+
+ local IMAGE=`uuidgen`
+ rbd create --size 1 --image-shared ${POOL}/${IMAGE}
+
+ # MGR might require some time to discover the OSD map w/ new pool
+ wait_for ceph rbd task add remove ${POOL}/${IMAGE}
+}
+
+test_flatten() {
+ echo "test_flatten"
+
+ local PARENT_IMAGE=`uuidgen`
+ local CHILD_IMAGE=`uuidgen`
+
+ rbd create --size 1 --image-shared ${POOL}/${PARENT_IMAGE}
+ rbd snap create ${POOL}/${PARENT_IMAGE}@snap
+ rbd clone ${POOL}/${PARENT_IMAGE}@snap ${POOL}/${POOL_NS}/${CHILD_IMAGE} --rbd-default-clone-format=2
+ [[ "$(rbd info --format json ${POOL}/${POOL_NS}/${CHILD_IMAGE} | jq 'has("parent")')" == "true" ]]
+
+ local TASK_ID=`ceph rbd task add flatten ${POOL}/${POOL_NS}/${CHILD_IMAGE} | jq --raw-output ".id"`
+ wait_for task_dne ${TASK_ID}
+
+ [[ "$(rbd info --format json ${POOL}/${POOL_NS}/${CHILD_IMAGE} | jq 'has("parent")')" == "false" ]]
+}
+
+test_trash_remove() {
+ echo "test_trash_remove"
+
+ local IMAGE=`uuidgen`
+ rbd create --size 1 --image-shared ${POOL}/${IMAGE}
+ local IMAGE_ID=`rbd info --format json ${POOL}/${IMAGE} | jq --raw-output ".id"`
+ rbd trash mv ${POOL}/${IMAGE}
+ [[ -n "$(rbd trash list ${POOL})" ]] || exit 1
+
+ local TASK_ID=`ceph rbd task add trash remove ${POOL}/${IMAGE_ID} | jq --raw-output ".id"`
+ wait_for task_dne ${TASK_ID}
+
+ [[ -z "$(rbd trash list ${POOL})" ]] || exit 1
+}
+
+test_migration_execute() {
+ echo "test_migration_execute"
+
+ local SOURCE_IMAGE=`uuidgen`
+ local TARGET_IMAGE=`uuidgen`
+ rbd create --size 1 --image-shared ${POOL}/${SOURCE_IMAGE}
+ rbd migration prepare ${POOL}/${SOURCE_IMAGE} ${POOL}/${TARGET_IMAGE}
+ [[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "prepared" ]]
+
+ local TASK_ID=`ceph rbd task add migration execute ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
+ wait_for task_dne ${TASK_ID}
+
+ [[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "executed" ]]
+}
+
+test_migration_commit() {
+ echo "test_migration_commit"
+
+ local SOURCE_IMAGE=`uuidgen`
+ local TARGET_IMAGE=`uuidgen`
+ rbd create --size 1 --image-shared ${POOL}/${SOURCE_IMAGE}
+ rbd migration prepare ${POOL}/${SOURCE_IMAGE} ${POOL}/${TARGET_IMAGE}
+ [[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "prepared" ]]
+
+ local TASK_ID=`ceph rbd task add migration execute ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
+ wait_for task_dne ${TASK_ID}
+
+ TASK_ID=`ceph rbd task add migration commit ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
+ wait_for task_dne ${TASK_ID}
+
+ [[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq 'has("migration")')" == "false" ]]
+ (rbd info ${POOL}/${SOURCE_IMAGE} && return 1) || true
+ rbd info ${POOL}/${TARGET_IMAGE}
+}
+
+test_migration_abort() {
+ echo "test_migration_abort"
+
+ local SOURCE_IMAGE=`uuidgen`
+ local TARGET_IMAGE=`uuidgen`
+ rbd create --size 1 --image-shared ${POOL}/${SOURCE_IMAGE}
+ rbd migration prepare ${POOL}/${SOURCE_IMAGE} ${POOL}/${TARGET_IMAGE}
+ [[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "prepared" ]]
+
+ local TASK_ID=`ceph rbd task add migration execute ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
+ wait_for task_dne ${TASK_ID}
+
+ TASK_ID=`ceph rbd task add migration abort ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
+ wait_for task_dne ${TASK_ID}
+
+ [[ "$(rbd status --format json ${POOL}/${SOURCE_IMAGE} | jq 'has("migration")')" == "false" ]]
+ rbd info ${POOL}/${SOURCE_IMAGE}
+ (rbd info ${POOL}/${TARGET_IMAGE} && return 1) || true
+}
+
+test_list() {
+ echo "test_list"
+
+ local IMAGE_1=`uuidgen`
+ local IMAGE_2=`uuidgen`
+
+ rbd create --size 1T --image-shared ${POOL}/${IMAGE_1}
+ rbd create --size 1T --image-shared ${POOL}/${IMAGE_2}
+
+ local TASK_ID_1=`ceph rbd task add remove ${POOL}/${IMAGE_1} | jq --raw-output ".id"`
+ local TASK_ID_2=`ceph rbd task add remove ${POOL}/${IMAGE_2} | jq --raw-output ".id"`
+
+ local LIST_FILE="${TEMPDIR}/list_file"
+ ceph rbd task list > ${LIST_FILE}
+ cat ${LIST_FILE}
+
+ [[ $(jq "[.[] | .id] | contains([\"${TASK_ID_1}\", \"${TASK_ID_2}\"])" ${LIST_FILE}) == "true" ]]
+
+ ceph rbd task cancel ${TASK_ID_1}
+ ceph rbd task cancel ${TASK_ID_2}
+}
+
+test_cancel() {
+ echo "test_cancel"
+
+ local IMAGE=`uuidgen`
+ rbd create --size 1T --image-shared ${POOL}/${IMAGE}
+ local TASK_ID=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
+
+ wait_for task_exists ${TASK_ID}
+
+ ceph rbd task cancel ${TASK_ID}
+ wait_for task_dne ${TASK_ID}
+}
+
+test_duplicate_task() {
+ echo "test_duplicate_task"
+
+ local IMAGE=`uuidgen`
+ rbd create --size 1T --image-shared ${POOL}/${IMAGE}
+ local IMAGE_ID=`rbd info --format json ${POOL}/${IMAGE} | jq --raw-output ".id"`
+ rbd trash mv ${POOL}/${IMAGE}
+
+ local TASK_ID_1=`ceph rbd task add trash remove ${POOL}/${IMAGE_ID} | jq --raw-output ".id"`
+ local TASK_ID_2=`ceph rbd task add trash remove ${POOL}/${IMAGE_ID} | jq --raw-output ".id"`
+
+ [[ "${TASK_ID_1}" == "${TASK_ID_2}" ]]
+
+ ceph rbd task cancel ${TASK_ID_1}
+}
+
+test_duplicate_name() {
+ echo "test_duplicate_name"
+
+ local IMAGE=`uuidgen`
+ rbd create --size 1G --image-shared ${POOL}/${IMAGE}
+ local TASK_ID_1=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
+
+ wait_for task_dne ${TASK_ID_1}
+
+ rbd create --size 1G --image-shared ${POOL}/${IMAGE}
+ local TASK_ID_2=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
+
+ [[ "${TASK_ID_1}" != "${TASK_ID_2}" ]]
+ wait_for task_dne ${TASK_ID_2}
+
+ local TASK_ID_3=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
+
+ [[ "${TASK_ID_2}" == "${TASK_ID_3}" ]]
+}
+
+test_progress() {
+ echo "test_progress"
+
+ local IMAGE_1=`uuidgen`
+ local IMAGE_2=`uuidgen`
+
+ rbd create --size 1 --image-shared ${POOL}/${IMAGE_1}
+ local TASK_ID_1=`ceph rbd task add remove ${POOL}/${IMAGE_1} | jq --raw-output ".id"`
+
+ wait_for task_dne ${TASK_ID_1}
+
+ local PROGRESS_FILE="${TEMPDIR}/progress_file"
+ ceph progress json > ${PROGRESS_FILE}
+ cat ${PROGRESS_FILE}
+
+ [[ $(jq "[.completed | .[].id] | contains([\"${TASK_ID_1}\"])" ${PROGRESS_FILE}) == "true" ]]
+
+ rbd create --size 1T --image-shared ${POOL}/${IMAGE_2}
+ local TASK_ID_2=`ceph rbd task add remove ${POOL}/${IMAGE_2} | jq --raw-output ".id"`
+
+ wait_for task_in_progress ${TASK_ID_2}
+ ceph progress json > ${PROGRESS_FILE}
+ cat ${PROGRESS_FILE}
+
+ [[ $(jq "[.events | .[].id] | contains([\"${TASK_ID_2}\"])" ${PROGRESS_FILE}) == "true" ]]
+
+ ceph rbd task cancel ${TASK_ID_2}
+ wait_for task_dne ${TASK_ID_2}
+
+ ceph progress json > ${PROGRESS_FILE}
+ cat ${PROGRESS_FILE}
+
+ [[ $(jq "[.completed | map(select(.failed)) | .[].id] | contains([\"${TASK_ID_2}\"])" ${PROGRESS_FILE}) == "true" ]]
+}
+
+setup
+test_remove
+test_flatten
+test_trash_remove
+test_migration_execute
+test_migration_commit
+test_migration_abort
+test_list
+test_cancel
+test_duplicate_task
+test_duplicate_name
+test_progress
+
+echo OK
diff --git a/qa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh b/qa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh
new file mode 100755
index 000000000..501c69cd5
--- /dev/null
+++ b/qa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+#
+# Regression test for http://tracker.ceph.com/issues/14984
+#
+# When the bug is present, starting the rbdmap service causes
+# a bogus log message to be emitted to the log because the RBDMAPFILE
+# environment variable is not set.
+#
+# When the bug is not present, starting the rbdmap service will emit
+# no log messages, because /etc/ceph/rbdmap does not contain any lines
+# that require processing.
+#
+set -ex
+
+echo "TEST: save timestamp for use later with journalctl --since"
+TIMESTAMP=$(date +%Y-%m-%d\ %H:%M:%S)
+
+echo "TEST: assert that rbdmap has not logged anything since boot"
+journalctl -b 0 -t rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
+journalctl -b 0 -t init-rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
+
+echo "TEST: restart the rbdmap.service"
+sudo systemctl restart rbdmap.service
+
+echo "TEST: ensure that /usr/bin/rbdmap runs to completion"
+until sudo systemctl status rbdmap.service | grep 'active (exited)' ; do
+ sleep 0.5
+done
+
+echo "TEST: assert that rbdmap has not logged anything since TIMESTAMP"
+journalctl --since "$TIMESTAMP" -t rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
+journalctl --since "$TIMESTAMP" -t init-rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
+
+exit 0
diff --git a/qa/workunits/rbd/verify_pool.sh b/qa/workunits/rbd/verify_pool.sh
new file mode 100755
index 000000000..08bcca506
--- /dev/null
+++ b/qa/workunits/rbd/verify_pool.sh
@@ -0,0 +1,27 @@
+#!/bin/sh -ex
+
+POOL_NAME=rbd_test_validate_pool
+PG_NUM=32
+
+tear_down () {
+ ceph osd pool delete $POOL_NAME $POOL_NAME --yes-i-really-really-mean-it || true
+}
+
+set_up () {
+ tear_down
+ ceph osd pool create $POOL_NAME $PG_NUM
+ ceph osd pool mksnap $POOL_NAME snap
+ rbd pool init $POOL_NAME
+}
+
+trap tear_down EXIT HUP INT
+set_up
+
+# creating an image in a pool-managed snapshot pool should fail
+rbd create --pool $POOL_NAME --size 1 foo && exit 1 || true
+
+# should succeed if the pool already marked as validated
+printf "overwrite validated" | rados --pool $POOL_NAME put rbd_info -
+rbd create --pool $POOL_NAME --size 1 foo
+
+echo OK
diff --git a/qa/workunits/rename/all.sh b/qa/workunits/rename/all.sh
new file mode 100755
index 000000000..f97ff773f
--- /dev/null
+++ b/qa/workunits/rename/all.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+set -ex
+
+dir=`dirname $0`
+
+CEPH_TOOL='./ceph'
+$CEPH_TOOL || CEPH_TOOL='ceph'
+
+CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/prepare.sh
+
+CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_nul.sh
+rm ./?/* || true
+
+CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/rem_nul.sh
+rm ./?/* || true
+
+CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_pri.sh
+rm ./?/* || true
+
+CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/rem_pri.sh
+rm ./?/* || true
+
+CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/rem_rem.sh
+rm ./?/* || true
+
+CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_nul.sh
+rm -r ./?/* || true
+
+CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_pri.sh
+rm -r ./?/* || true
+
+CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/dir_pri_pri.sh
+rm -r ./?/* || true
+
+CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/dir_pri_nul.sh
+rm -r ./?/* || true
+
diff --git a/qa/workunits/rename/dir_pri_nul.sh b/qa/workunits/rename/dir_pri_nul.sh
new file mode 100755
index 000000000..dd8106b63
--- /dev/null
+++ b/qa/workunits/rename/dir_pri_nul.sh
@@ -0,0 +1,28 @@
+#!/bin/sh -ex
+
+# dir: srcdn=destdn
+mkdir ./a/dir1
+mv ./a/dir1 ./a/dir1.renamed
+
+# dir: diff
+mkdir ./a/dir2
+mv ./a/dir2 ./b/dir2
+
+# dir: diff, child subtree on target
+mkdir -p ./a/dir3/child/foo
+$CEPH_TOOL mds tell 0 export_dir /a/dir3/child 1
+sleep 5
+mv ./a/dir3 ./b/dir3
+
+# dir: diff, child subtree on other
+mkdir -p ./a/dir4/child/foo
+$CEPH_TOOL mds tell 0 export_dir /a/dir4/child 2
+sleep 5
+mv ./a/dir4 ./b/dir4
+
+# dir: witness subtree adjustment
+mkdir -p ./a/dir5/1/2/3/4
+$CEPH_TOOL mds tell 0 export_dir /a/dir5/1/2/3 2
+sleep 5
+mv ./a/dir5 ./b
+
diff --git a/qa/workunits/rename/dir_pri_pri.sh b/qa/workunits/rename/dir_pri_pri.sh
new file mode 100755
index 000000000..de235fcd3
--- /dev/null
+++ b/qa/workunits/rename/dir_pri_pri.sh
@@ -0,0 +1,11 @@
+#!/bin/sh -ex
+
+# dir, srcdn=destdn
+mkdir ./a/dir1
+mkdir ./a/dir2
+mv -T ./a/dir1 ./a/dir2
+
+# dir, different
+mkdir ./a/dir3
+mkdir ./b/dir4
+mv -T ./a/dir3 ./b/dir4
diff --git a/qa/workunits/rename/plan.txt b/qa/workunits/rename/plan.txt
new file mode 100644
index 000000000..b423b4140
--- /dev/null
+++ b/qa/workunits/rename/plan.txt
@@ -0,0 +1,111 @@
+#!/bin/sh
+
+# srcdn destdn targeti
+
+## pri auth null auth -
+## pri rep null auth -
+## rem auth null auth -
+## rem rep null auth -
+
+#/ pri auth null rep - dup of pr_na
+#/ pri rep null rep -
+#/ rem auth null rep - dup of rr_na
+#/ rem rep null rep -
+
+
+## pri auth pri auth -
+# pri rep pri auth -
+## rem auth pri auth -
+# rem rep pri auth -
+
+# pri auth pri rep -
+# pri rep pri rep -
+# rem auth pri rep -
+# rem rep pri rep -
+
+## pri auth rem auth auth
+# pri rep rem auth auth
+## rem auth rem auth auth
+# rem rep rem auth auth
+
+# pri auth rem rep auth
+# pri rep rem rep auth
+# rem auth rem rep auth
+# rem rep rem rep auth
+
+# pri auth rem auth rep
+# pri rep rem auth rep
+# rem auth rem auth rep
+# rem rep rem auth rep
+
+# pri auth rem rep rep
+# pri rep rem rep rep
+# rem auth rem rep rep
+# rem rep rem rep rep
+
+
+types of operations
+
+pri nul
+ srcdn=destdn
+ diff
+
+rem nul
+ srci=srcdn=destdn
+ srci=srcdn
+ srcdn=destdn
+ srci=destdn
+ all different
+
+pri pri
+ srcdn=destdn
+ different
+
+rem pri
+ srci=srcdn=destdn
+ srci=srcdn
+ srcdn=destdn
+ srci=destdn
+ all different
+
+pri rem
+ srcdn=destdn=desti
+ srcdn=destdn
+ destdn=desti
+ srcdn=desti
+ all different
+
+rem rem
+ srci=srcdn=destdn=desti
+ srci=srcdn=destdn
+ srci=srcdn=desti
+ srci=destdn=desti
+ srcdni=destdn=desti
+ srci=srcdn destdn=desti
+ srci=destdn srcdn=desti
+ srci=desti srcdn=destdn
+ srci=srcdn
+ srci=destdn
+ srci=desti
+ srcdn=destdn
+ srcdn=desti
+ destdn=desti
+ all different
+
+
+
+
+
+
+
+
+
+p n same
+r n same
+p n diff
+r n diff
+
+p p same
+r p same
+
+p r
diff --git a/qa/workunits/rename/prepare.sh b/qa/workunits/rename/prepare.sh
new file mode 100755
index 000000000..b5ba4ae58
--- /dev/null
+++ b/qa/workunits/rename/prepare.sh
@@ -0,0 +1,21 @@
+#!/bin/sh -ex
+
+$CEPH_TOOL mds tell 0 injectargs '--mds-bal-interval 0'
+$CEPH_TOOL mds tell 1 injectargs '--mds-bal-interval 0'
+$CEPH_TOOL mds tell 2 injectargs '--mds-bal-interval 0'
+$CEPH_TOOL mds tell 3 injectargs '--mds-bal-interval 0'
+#$CEPH_TOOL mds tell 4 injectargs '--mds-bal-interval 0'
+
+mkdir -p ./a/a
+mkdir -p ./b/b
+mkdir -p ./c/c
+mkdir -p ./d/d
+
+mount_dir=`df . | grep -o " /.*" | grep -o "/.*"`
+cur_dir=`pwd`
+ceph_dir=${cur_dir##$mount_dir}
+$CEPH_TOOL mds tell 0 export_dir $ceph_dir/b 1
+$CEPH_TOOL mds tell 0 export_dir $ceph_dir/c 2
+$CEPH_TOOL mds tell 0 export_dir $ceph_dir/d 3
+sleep 5
+
diff --git a/qa/workunits/rename/pri_nul.sh b/qa/workunits/rename/pri_nul.sh
new file mode 100755
index 000000000..c40ec1d25
--- /dev/null
+++ b/qa/workunits/rename/pri_nul.sh
@@ -0,0 +1,11 @@
+#!/bin/sh -ex
+
+# srcdn=destdn
+touch ./a/file1
+mv ./a/file1 ./a/file1.renamed
+
+# different
+touch ./a/file2
+mv ./a/file2 ./b
+
+
diff --git a/qa/workunits/rename/pri_pri.sh b/qa/workunits/rename/pri_pri.sh
new file mode 100755
index 000000000..b74985fe3
--- /dev/null
+++ b/qa/workunits/rename/pri_pri.sh
@@ -0,0 +1,12 @@
+#!/bin/sh -ex
+
+# srcdn=destdn
+touch ./a/file1
+touch ./a/file2
+mv ./a/file1 ./a/file2
+
+# different (srcdn != destdn)
+touch ./a/file3
+touch ./b/file4
+mv ./a/file3 ./b/file4
+
diff --git a/qa/workunits/rename/pri_rem.sh b/qa/workunits/rename/pri_rem.sh
new file mode 100755
index 000000000..a1cd03d10
--- /dev/null
+++ b/qa/workunits/rename/pri_rem.sh
@@ -0,0 +1,31 @@
+#!/bin/sh -ex
+
+dotest() {
+ src=$1
+ desti=$2
+ destdn=$3
+ n=$4
+
+ touch ./$src/src$n
+ touch ./$desti/desti$n
+ ln ./$desti/desti$n ./$destdn/destdn$n
+
+ mv ./$src/src$n ./$destdn/destdn$n
+}
+
+
+# srcdn=destdn=desti
+dotest 'a' 'a' 'a' 1
+
+# destdn=desti
+dotest 'b' 'a' 'a' 2
+
+# srcdn=destdn
+dotest 'a' 'b' 'a' 3
+
+# srcdn=desti
+dotest 'a' 'a' 'b' 4
+
+# all different
+dotest 'a' 'b' 'c' 5
+
diff --git a/qa/workunits/rename/rem_nul.sh b/qa/workunits/rename/rem_nul.sh
new file mode 100755
index 000000000..a71033108
--- /dev/null
+++ b/qa/workunits/rename/rem_nul.sh
@@ -0,0 +1,29 @@
+#!/bin/sh -ex
+
+dotest() {
+ srci=$1
+ srcdn=$2
+ dest=$3
+ n=$4
+
+ touch ./$srci/srci$n
+ ln ./$srci/srci$n ./$srcdn/srcdn$n
+
+ mv ./$srcdn/srcdn$n ./$dest/dest$n
+}
+
+# srci=srcdn=destdn
+dotest 'a' 'a' 'a' 1
+
+# srcdn=destdn
+dotest 'b' 'a' 'a' 2
+
+# srci=destdn
+dotest 'a' 'b' 'a' 3
+
+# srci=srcdn
+dotest 'a' 'a' 'b' 4
+
+# all different
+dotest 'a' 'b' 'c' 5
+
diff --git a/qa/workunits/rename/rem_pri.sh b/qa/workunits/rename/rem_pri.sh
new file mode 100755
index 000000000..501ac5e1a
--- /dev/null
+++ b/qa/workunits/rename/rem_pri.sh
@@ -0,0 +1,29 @@
+#!/bin/sh -ex
+
+dotest() {
+ srci=$1
+ srcdn=$2
+ dest=$3
+ n=$4
+
+ touch ./$srci/srci$n
+ ln ./$srci/srci$n ./$srcdn/srcdn$n
+ touch ./$dest/dest$n
+
+ mv ./$srcdn/srcdn$n ./$dest/dest$n
+}
+
+# srci=srcdn=destdn
+dotest 'a' 'a' 'a' 1
+
+# srcdn=destdn
+dotest 'b' 'a' 'a' 2
+
+# srci=destdn
+dotest 'a' 'b' 'a' 3
+
+# srci=srcdn
+dotest 'a' 'a' 'b' 4
+
+# all different
+dotest 'a' 'b' 'c' 5
diff --git a/qa/workunits/rename/rem_rem.sh b/qa/workunits/rename/rem_rem.sh
new file mode 100755
index 000000000..80028c517
--- /dev/null
+++ b/qa/workunits/rename/rem_rem.sh
@@ -0,0 +1,61 @@
+#!/bin/sh -ex
+
+dotest() {
+ srci=$1
+ srcdn=$2
+ desti=$3
+ destdn=$4
+ n=$5
+
+ touch ./$srci/srci$n
+ ln ./$srci/srci$n ./$srcdn/srcdn$n
+ touch ./$desti/desti$n
+ ln ./$desti/desti$n ./$destdn/destdn$n
+
+ mv ./$srcdn/srcdn$n ./$destdn/destdn$n
+}
+
+# srci=srcdn=destdn=desti
+dotest 'a' 'a' 'a' 'a' 1
+
+# srcdn=destdn=desti
+dotest 'b' 'a' 'a' 'a' 2
+
+# srci=destdn=desti
+dotest 'a' 'b' 'a' 'a' 3
+
+# srci=srcdn=destdn
+dotest 'a' 'a' 'b' 'a' 4
+
+# srci=srcdn=desti
+dotest 'a' 'a' 'a' 'b' 5
+
+# srci=srcdn destdn=desti
+dotest 'a' 'a' 'b' 'b' 6
+
+# srci=destdn srcdn=desti
+dotest 'a' 'b' 'b' 'a' 7
+
+# srci=desti srcdn=destdn
+dotest 'a' 'b' 'a' 'b' 8
+
+# srci=srcdn
+dotest 'a' 'a' 'b' 'c' 9
+
+# srci=desti
+dotest 'a' 'b' 'a' 'c' 10
+
+# srci=destdn
+dotest 'a' 'b' 'c' 'a' 11
+
+# srcdn=desti
+dotest 'a' 'b' 'b' 'c' 12
+
+# srcdn=destdn
+dotest 'a' 'b' 'c' 'b' 13
+
+# destdn=desti
+dotest 'a' 'b' 'c' 'c' 14
+
+# all different
+dotest 'a' 'b' 'c' 'd' 15
diff --git a/qa/workunits/rest/test-restful.sh b/qa/workunits/rest/test-restful.sh
new file mode 100755
index 000000000..fde0d107a
--- /dev/null
+++ b/qa/workunits/rest/test-restful.sh
@@ -0,0 +1,10 @@
+#!/bin/sh -ex
+
+mydir=`dirname $0`
+
+secret=`ceph config-key get mgr/restful/keys/admin`
+url=$(ceph mgr dump|jq -r .services.restful|sed -e 's/\/$//')
+echo "url $url secret $secret"
+$mydir/test_mgr_rest_api.py $url $secret
+
+echo $0 OK
diff --git a/qa/workunits/rest/test_mgr_rest_api.py b/qa/workunits/rest/test_mgr_rest_api.py
new file mode 100755
index 000000000..74126ab78
--- /dev/null
+++ b/qa/workunits/rest/test_mgr_rest_api.py
@@ -0,0 +1,98 @@
+#! /usr/bin/env python3
+
+import requests
+import time
+import sys
+import json
+
+# Do not show the stupid message about verify=False. ignore exceptions bc
+# this doesn't work on some distros.
+try:
+ from requests.packages.urllib3.exceptions import InsecureRequestWarning
+ requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
+except:
+ pass
+
+if len(sys.argv) < 3:
+ print("Usage: %s <url> <admin_key>" % sys.argv[0])
+ sys.exit(1)
+
+addr = sys.argv[1]
+auth = ('admin', sys.argv[2])
+headers = {'Content-type': 'application/json'}
+
+request = None
+
+# Create a pool and get its id
+request = requests.post(
+ addr + '/pool?wait=yes',
+ data=json.dumps({'name': 'supertestfriends', 'pg_num': 128}),
+ headers=headers,
+ verify=False,
+ auth=auth)
+print(request.text)
+request = requests.get(addr + '/pool', verify=False, auth=auth)
+assert(request.json()[-1]['pool_name'] == 'supertestfriends')
+pool_id = request.json()[-1]['pool']
+
+# get a mon name
+request = requests.get(addr + '/mon', verify=False, auth=auth)
+firstmon = request.json()[0]['name']
+print('first mon is %s' % firstmon)
+
+# get a server name
+request = requests.get(addr + '/osd', verify=False, auth=auth)
+aserver = request.json()[0]['server']
+print('a server is %s' % aserver)
+
+
+screenplay = [
+ ('get', '/', {}),
+ ('get', '/config/cluster', {}),
+ ('get', '/crush/rule', {}),
+ ('get', '/doc', {}),
+ ('get', '/mon', {}),
+ ('get', '/mon/' + firstmon, {}),
+ ('get', '/osd', {}),
+ ('get', '/osd/0', {}),
+ ('get', '/osd/0/command', {}),
+ ('get', '/pool/1', {}),
+ ('get', '/server', {}),
+ ('get', '/server/' + aserver, {}),
+ ('post', '/osd/0/command', {'command': 'scrub'}),
+ ('post', '/pool?wait=1', {'name': 'supertestfriends', 'pg_num': 128}),
+ ('patch', '/osd/0', {'in': False}),
+ ('patch', '/config/osd', {'pause': True}),
+ ('get', '/config/osd', {}),
+ ('patch', '/pool/' + str(pool_id), {'size': 2}),
+ ('patch', '/config/osd', {'pause': False}),
+ ('patch', '/osd/0', {'in': True}),
+ ('get', '/pool', {}),
+ ('delete', '/pool/' + str(pool_id) + '?wait=1', {}),
+ ('get', '/request?page=0', {}),
+ ('delete', '/request', {}),
+ ('get', '/request', {}),
+ ('patch', '/pool/1', {'pg_num': 128}),
+ ('patch', '/pool/1', {'pgp_num': 128}),
+ ('get', '/perf?daemon=.*', {}),
+]
+
+for method, endpoint, args in screenplay:
+ if method == 'sleep':
+ time.sleep(endpoint)
+ continue
+ url = addr + endpoint
+ print("URL = " + url)
+ request = getattr(requests, method)(
+ url,
+ data=json.dumps(args) if args else None,
+ headers=headers,
+ verify=False,
+ auth=auth)
+ assert request is not None
+ print(request.text)
+ if request.status_code != 200 or 'error' in request.json():
+ print('ERROR: %s request for URL "%s" failed' % (method, url))
+ sys.exit(1)
+
+print('OK')
diff --git a/qa/workunits/restart/test-backtraces.py b/qa/workunits/restart/test-backtraces.py
new file mode 100755
index 000000000..37ddef539
--- /dev/null
+++ b/qa/workunits/restart/test-backtraces.py
@@ -0,0 +1,250 @@
+#!/usr/bin/env python3
+
+from __future__ import print_function
+
+import subprocess
+import json
+import os
+import time
+import sys
+
+import rados as rados
+import cephfs as cephfs
+
+prefix='testbt'
+
+def get_name(b, i, j):
+ c = '{pre}.{pid}.{i}.{j}'.format(pre=prefix, pid=os.getpid(), i=i, j=j)
+ return c, b + '/' + c
+
+def mkdir(ceph, d):
+ print("mkdir {d}".format(d=d), file=sys.stderr)
+ ceph.mkdir(d, 0o755)
+ return ceph.stat(d)['st_ino']
+
+def create(ceph, f):
+ print("creating {f}".format(f=f), file=sys.stderr)
+ fd = ceph.open(f, os.O_CREAT | os.O_RDWR, 0o644)
+ ceph.close(fd)
+ return ceph.stat(f)['st_ino']
+
+def set_mds_config_param(ceph, param):
+ with open('/dev/null', 'rb') as devnull:
+ confarg = ''
+ if conf != '':
+ confarg = '-c {c}'.format(c=conf)
+ r = subprocess.call("ceph {ca} mds tell a injectargs '{p}'".format(ca=confarg, p=param), shell=True, stdout=devnull)
+ if r != 0:
+ raise Exception
+
+
+class _TrimIndentFile(object):
+ def __init__(self, fp):
+ self.fp = fp
+
+ def readline(self):
+ line = self.fp.readline()
+ return line.lstrip(' \t')
+
+def _optionxform(s):
+ s = s.replace('_', ' ')
+ s = '_'.join(s.split())
+ return s
+
+def conf_set_kill_mds(location, killnum):
+ print('setting mds kill config option for {l}.{k}'.format(l=location, k=killnum), file=sys.stderr)
+ print("restart mds a mds_kill_{l}_at {k}".format(l=location, k=killnum))
+ sys.stdout.flush()
+ for l in sys.stdin.readline():
+ if l == 'restarted':
+ break
+
+def flush(ceph, testnum):
+ print('flushing {t}'.format(t=testnum), file=sys.stderr)
+ set_mds_config_param(ceph, '--mds_log_max_segments 1')
+
+ for i in range(1, 500):
+ f = '{p}.{pid}.{t}.{i}'.format(p=prefix, pid=os.getpid(), t=testnum, i=i)
+ print('flushing with create {f}'.format(f=f), file=sys.stderr)
+ fd = ceph.open(f, os.O_CREAT | os.O_RDWR, 0o644)
+ ceph.close(fd)
+ ceph.unlink(f)
+
+ print('flush doing shutdown', file=sys.stderr)
+ ceph.shutdown()
+ print('flush reinitializing ceph', file=sys.stderr)
+ ceph = cephfs.LibCephFS(conffile=conf)
+ print('flush doing mount', file=sys.stderr)
+ ceph.mount()
+ return ceph
+
+def kill_mds(ceph, location, killnum):
+ print('killing mds: {l}.{k}'.format(l=location, k=killnum), file=sys.stderr)
+ set_mds_config_param(ceph, '--mds_kill_{l}_at {k}'.format(l=location, k=killnum))
+
+def wait_for_mds(ceph):
+ # wait for restart
+ while True:
+ confarg = ''
+ if conf != '':
+ confarg = '-c {c}'.format(c=conf)
+ r = subprocess.check_output("ceph {ca} mds stat".format(ca=confarg), shell=True).decode()
+ if r.find('a=up:active'):
+ break
+ time.sleep(1)
+
+def decode(value):
+
+ tmpfile = '/tmp/{p}.{pid}'.format(p=prefix, pid=os.getpid())
+ with open(tmpfile, 'w+') as f:
+ f.write(value)
+
+ p = subprocess.Popen(
+ [
+ 'ceph-dencoder',
+ 'import',
+ tmpfile,
+ 'type',
+ 'inode_backtrace_t',
+ 'decode',
+ 'dump_json',
+ ],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ )
+ (stdout, _) = p.communicate(input=value)
+ p.stdin.close()
+ if p.returncode != 0:
+ raise Exception
+ os.remove(tmpfile)
+ return json.loads(stdout)
+
+class VerifyFailure(Exception):
+ pass
+
+def verify(rados_ioctx, ino, values, pool):
+ print('getting parent attr for ino: %lx.00000000' % ino, file=sys.stderr)
+ savede = None
+ for i in range(1, 20):
+ try:
+ savede = None
+ binbt = rados_ioctx.get_xattr('%lx.00000000' % ino, 'parent')
+ except rados.ObjectNotFound as e:
+ # wait for a bit to let segments get flushed out
+ savede = e
+ time.sleep(10)
+ if savede:
+ raise savede
+
+ bt = decode(binbt)
+
+ ind = 0
+ if bt['ino'] != ino:
+ raise VerifyFailure('inode mismatch: {bi} != {ino}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
+ bi=bt['ancestors'][ind]['dname'], ino=ino, bt=bt, i=ino, v=values))
+ for (n, i) in values:
+ if bt['ancestors'][ind]['dirino'] != i:
+ raise VerifyFailure('ancestor dirino mismatch: {b} != {ind}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
+ b=bt['ancestors'][ind]['dirino'], ind=i, bt=bt, i=ino, v=values))
+ if bt['ancestors'][ind]['dname'] != n:
+ raise VerifyFailure('ancestor dname mismatch: {b} != {n}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
+ b=bt['ancestors'][ind]['dname'], n=n, bt=bt, i=ino, v=values))
+ ind += 1
+
+ if bt['pool'] != pool:
+ raise VerifyFailure('pool mismatch: {btp} != {p}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
+ btp=bt['pool'], p=pool, bt=bt, i=ino, v=values))
+
+def make_abc(ceph, rooti, i):
+ expected_bt = []
+ c, d = get_name("/", i, 0)
+ expected_bt = [(c, rooti)] + expected_bt
+ di = mkdir(ceph, d)
+ c, d = get_name(d, i, 1)
+ expected_bt = [(c, di)] + expected_bt
+ di = mkdir(ceph, d)
+ c, f = get_name(d, i, 2)
+ fi = create(ceph, f)
+ expected_bt = [(c, di)] + expected_bt
+ return fi, expected_bt
+
+test = -1
+if len(sys.argv) > 1:
+ test = int(sys.argv[1])
+
+conf = ''
+if len(sys.argv) > 2:
+ conf = sys.argv[2]
+
+radosobj = rados.Rados(conffile=conf)
+radosobj.connect()
+ioctx = radosobj.open_ioctx('data')
+
+ceph = cephfs.LibCephFS(conffile=conf)
+ceph.mount()
+
+rooti = ceph.stat('/')['st_ino']
+
+test = -1
+if len(sys.argv) > 1:
+ test = int(sys.argv[1])
+
+conf = '/etc/ceph/ceph.conf'
+if len(sys.argv) > 2:
+ conf = sys.argv[2]
+
+# create /a/b/c
+# flush
+# verify
+
+i = 0
+if test < 0 or test == i:
+ print('Running test %d: basic verify' % i, file=sys.stderr)
+ ino, expected_bt = make_abc(ceph, rooti, i)
+ ceph = flush(ceph, i)
+ verify(ioctx, ino, expected_bt, 0)
+
+i += 1
+
+# kill-mds-at-openc-1
+# create /a/b/c
+# restart-mds
+# flush
+# verify
+
+if test < 0 or test == i:
+ print('Running test %d: kill openc' % i, file=sys.stderr)
+ print("restart mds a")
+ sys.stdout.flush()
+ kill_mds(ceph, 'openc', 1)
+ ino, expected_bt = make_abc(ceph, rooti, i)
+ ceph = flush(ceph, i)
+ verify(ioctx, ino, expected_bt, 0)
+
+i += 1
+
+# kill-mds-at-openc-1
+# create /a/b/c
+# restart-mds with kill-mds-at-replay-1
+# restart-mds
+# flush
+# verify
+if test < 0 or test == i:
+ print('Running test %d: kill openc/replay' % i, file=sys.stderr)
+ # these are reversed because we want to prepare the config
+ conf_set_kill_mds('journal_replay', 1)
+ kill_mds(ceph, 'openc', 1)
+ print("restart mds a")
+ sys.stdout.flush()
+ ino, expected_bt = make_abc(ceph, rooti, i)
+ ceph = flush(ceph, i)
+ verify(ioctx, ino, expected_bt, 0)
+
+i += 1
+
+ioctx.close()
+radosobj.shutdown()
+ceph.shutdown()
+
+print("done")
+sys.stdout.flush()
diff --git a/qa/workunits/rgw/common.py b/qa/workunits/rgw/common.py
new file mode 100755
index 000000000..2c9c5d035
--- /dev/null
+++ b/qa/workunits/rgw/common.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python3
+
+import errno
+import subprocess
+import logging as log
+import boto3
+import botocore.exceptions
+import random
+import json
+from time import sleep
+
+log.basicConfig(format = '%(message)s', level=log.DEBUG)
+log.getLogger('botocore').setLevel(log.CRITICAL)
+log.getLogger('boto3').setLevel(log.CRITICAL)
+log.getLogger('urllib3').setLevel(log.CRITICAL)
+
+def exec_cmd(cmd, wait = True, **kwargs):
+ check_retcode = kwargs.pop('check_retcode', True)
+ kwargs['shell'] = True
+ kwargs['stdout'] = subprocess.PIPE
+ proc = subprocess.Popen(cmd, **kwargs)
+ log.info(proc.args)
+ if wait:
+ out, _ = proc.communicate()
+ if check_retcode:
+ assert(proc.returncode == 0)
+ return out
+ return (out, proc.returncode)
+ return ''
+
+def create_user(uid, display_name, access_key, secret_key):
+ _, ret = exec_cmd(f'radosgw-admin user create --uid {uid} --display-name "{display_name}" --access-key {access_key} --secret {secret_key}', check_retcode=False)
+ assert(ret == 0 or errno.EEXIST)
+
+def boto_connect(access_key, secret_key, config=None):
+ def try_connect(portnum, ssl, proto):
+ endpoint = proto + '://localhost:' + portnum
+ conn = boto3.resource('s3',
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ use_ssl=ssl,
+ endpoint_url=endpoint,
+ verify=False,
+ config=config,
+ )
+ try:
+ list(conn.buckets.limit(1)) # just verify we can list buckets
+ except botocore.exceptions.ConnectionError as e:
+ print(e)
+ raise
+ print('connected to', endpoint)
+ return conn
+ try:
+ return try_connect('80', False, 'http')
+ except botocore.exceptions.ConnectionError:
+ try: # retry on non-privileged http port
+ return try_connect('8000', False, 'http')
+ except botocore.exceptions.ConnectionError:
+ # retry with ssl
+ return try_connect('443', True, 'https')
+
+def put_objects(bucket, key_list):
+ objs = []
+ for key in key_list:
+ o = bucket.put_object(Key=key, Body=b"some_data")
+ objs.append((o.key, o.version_id))
+ return objs
+
+def create_unlinked_objects(conn, bucket, key_list):
+ # creates an unlinked/unlistable object for each key in key_list
+
+ object_versions = []
+ try:
+ exec_cmd('ceph config set client rgw_debug_inject_set_olh_err 2')
+ exec_cmd('ceph config set client rgw_debug_inject_olh_cancel_modification_err true')
+ sleep(1)
+ for key in key_list:
+ tag = str(random.randint(0, 1_000_000))
+ try:
+ bucket.put_object(Key=key, Body=b"some_data", Metadata = {
+ 'tag': tag,
+ })
+ except Exception as e:
+ log.debug(e)
+ out = exec_cmd(f'radosgw-admin bi list --bucket {bucket.name} --object {key}')
+ instance_entries = filter(
+ lambda x: x['type'] == 'instance',
+ json.loads(out.replace(b'\x80', b'0x80')))
+ found = False
+ for ie in instance_entries:
+ instance_id = ie['entry']['instance']
+ ov = conn.ObjectVersion(bucket.name, key, instance_id).head()
+ if ov['Metadata'] and ov['Metadata']['tag'] == tag:
+ object_versions.append((key, instance_id))
+ found = True
+ break
+ if not found:
+ raise Exception(f'failed to create unlinked object for key={key}')
+ finally:
+ exec_cmd('ceph config rm client rgw_debug_inject_set_olh_err')
+ exec_cmd('ceph config rm client rgw_debug_inject_olh_cancel_modification_err')
+ return object_versions
+
diff --git a/qa/workunits/rgw/keystone-fake-server.py b/qa/workunits/rgw/keystone-fake-server.py
new file mode 100755
index 000000000..c05ad7bfd
--- /dev/null
+++ b/qa/workunits/rgw/keystone-fake-server.py
@@ -0,0 +1,208 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 Binero
+#
+# Author: Tobias Urdin <tobias.urdin@binero.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+
+from datetime import datetime, timedelta
+import logging
+import json
+from http.server import BaseHTTPRequestHandler, HTTPServer
+
+
+DEFAULT_DOMAIN = {
+ 'id': 'default',
+ 'name': 'Default',
+}
+
+
+PROJECTS = {
+ 'admin': {
+ 'domain': DEFAULT_DOMAIN,
+ 'id': 'a6944d763bf64ee6a275f1263fae0352',
+ 'name': 'admin',
+ },
+ 'deadbeef': {
+ 'domain': DEFAULT_DOMAIN,
+ 'id': 'b4221c214dd64ee6a464g2153fae3813',
+ 'name': 'deadbeef',
+ },
+}
+
+
+USERS = {
+ 'admin': {
+ 'domain': DEFAULT_DOMAIN,
+ 'id': '51cc68287d524c759f47c811e6463340',
+ 'name': 'admin',
+ },
+ 'deadbeef': {
+ 'domain': DEFAULT_DOMAIN,
+ 'id': '99gg485738df758349jf8d848g774392',
+ 'name': 'deadbeef',
+ },
+}
+
+
+USERROLES = {
+ 'admin': [
+ {
+ 'id': '51cc68287d524c759f47c811e6463340',
+ 'name': 'admin',
+ }
+ ],
+ 'deadbeef': [
+ {
+ 'id': '98bd32184f854f393a72b932g5334124',
+ 'name': 'Member',
+ }
+ ],
+}
+
+
+TOKENS = {
+ 'admin-token-1': {
+ 'username': 'admin',
+ 'project': 'admin',
+ 'expired': False,
+ },
+ 'user-token-1': {
+ 'username': 'deadbeef',
+ 'project': 'deadbeef',
+ 'expired': False,
+ },
+ 'user-token-2': {
+ 'username': 'deadbeef',
+ 'project': 'deadbeef',
+ 'expired': True,
+ },
+}
+
+
+def _generate_token_result(username, project, expired=False):
+ userdata = USERS[username]
+ projectdata = PROJECTS[project]
+ userroles = USERROLES[username]
+
+ if expired:
+ then = datetime.now() - timedelta(hours=2)
+ issued_at = then.strftime('%Y-%m-%dT%H:%M:%SZ')
+ expires_at = (then + timedelta(hours=1)).strftime('%Y-%m-%dT%H:%M:%SZ')
+ else:
+ now = datetime.now()
+ issued_at = now.strftime('%Y-%m-%dT%H:%M:%SZ')
+ expires_at = (now + timedelta(seconds=10)).strftime('%Y-%m-%dT%H:%M:%SZ')
+
+ result = {
+ 'token': {
+ 'audit_ids': ['3T2dc1CGQxyJsHdDu1xkcw'],
+ 'catalog': [],
+ 'expires_at': expires_at,
+ 'is_domain': False,
+ 'issued_at': issued_at,
+ 'methods': ['password'],
+ 'project': projectdata,
+ 'roles': userroles,
+ 'user': userdata,
+ }
+ }
+
+ return result
+
+
+COUNTERS = {
+ 'get_total': 0,
+ 'post_total': 0,
+}
+
+
+class HTTPRequestHandler(BaseHTTPRequestHandler):
+ def do_GET(self):
+ # This is not part of the Keystone API
+ if self.path == '/stats':
+ self._handle_stats()
+ return
+
+ if str(self.path).startswith('/v3/auth/tokens'):
+ self._handle_get_auth()
+ else:
+ self.send_response(403)
+ self.end_headers()
+
+ def do_POST(self):
+ if self.path == '/v3/auth/tokens':
+ self._handle_post_auth()
+ else:
+ self.send_response(400)
+ self.end_headers()
+
+ def _get_data(self):
+ length = int(self.headers.get('content-length'))
+ data = self.rfile.read(length).decode('utf8')
+ return json.loads(data)
+
+ def _set_data(self, data):
+ jdata = json.dumps(data)
+ self.wfile.write(jdata.encode('utf8'))
+
+ def _handle_stats(self):
+ self.send_response(200)
+ self.end_headers()
+ self._set_data(COUNTERS)
+
+ def _handle_get_auth(self):
+ logging.info('Increasing get_total counter from %d -> %d' % (COUNTERS['get_total'], COUNTERS['get_total']+1))
+ COUNTERS['get_total'] += 1
+ auth_token = self.headers.get('X-Subject-Token', None)
+ if auth_token and auth_token in TOKENS:
+ tokendata = TOKENS[auth_token]
+ if tokendata['expired'] and 'allow_expired=1' not in self.path:
+ self.send_response(404)
+ self.end_headers()
+ else:
+ self.send_response(200)
+ self.send_header('Content-Type', 'application/json')
+ self.end_headers()
+ result = _generate_token_result(tokendata['username'], tokendata['project'], tokendata['expired'])
+ self._set_data(result)
+ else:
+ self.send_response(404)
+ self.end_headers()
+
+ def _handle_post_auth(self):
+ logging.info('Increasing post_total counter from %d -> %d' % (COUNTERS['post_total'], COUNTERS['post_total']+1))
+ COUNTERS['post_total'] += 1
+ data = self._get_data()
+ user = data['auth']['identity']['password']['user']
+ if user['name'] == 'admin' and user['password'] == 'ADMIN':
+ self.send_response(201)
+ self.send_header('Content-Type', 'application/json')
+ self.send_header('X-Subject-Token', 'admin-token-1')
+ self.end_headers()
+ tokendata = TOKENS['admin-token-1']
+ result = _generate_token_result(tokendata['username'], tokendata['project'], tokendata['expired'])
+ self._set_data(result)
+ else:
+ self.send_response(401)
+ self.end_headers()
+
+
+def main():
+ logging.basicConfig(level=logging.DEBUG)
+ logging.info('Starting keystone-fake-server')
+ server = HTTPServer(('localhost', 5000), HTTPRequestHandler)
+ server.serve_forever()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/qa/workunits/rgw/keystone-service-token.sh b/qa/workunits/rgw/keystone-service-token.sh
new file mode 100755
index 000000000..fc39731ca
--- /dev/null
+++ b/qa/workunits/rgw/keystone-service-token.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2022 Binero
+#
+# Author: Tobias Urdin <tobias.urdin@binero.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+trap cleanup EXIT
+
+function cleanup() {
+ kill $KEYSTONE_FAKE_SERVER_PID
+ wait
+}
+
+function run() {
+ $CEPH_ROOT/qa/workunits/rgw//keystone-fake-server.py &
+ KEYSTONE_FAKE_SERVER_PID=$!
+ # Give fake Keystone server some seconds to startup
+ sleep 5
+ $CEPH_ROOT/qa/workunits/rgw/test-keystone-service-token.py
+}
+
+main keystone-service-token "$@"
diff --git a/qa/workunits/rgw/olh_noname_key b/qa/workunits/rgw/olh_noname_key
new file mode 100644
index 000000000..6138c57cd
--- /dev/null
+++ b/qa/workunits/rgw/olh_noname_key
@@ -0,0 +1 @@
+€1001_04/57/0457f727ec113e418d5b16d206b200ed068c0533554883ce811df7c932a3df68/2018_12_11/2889999/3386469/metadata.gz \ No newline at end of file
diff --git a/qa/workunits/rgw/olh_noname_val b/qa/workunits/rgw/olh_noname_val
new file mode 100644
index 000000000..ff442e137
--- /dev/null
+++ b/qa/workunits/rgw/olh_noname_val
Binary files differ
diff --git a/qa/workunits/rgw/run-bucket-check.sh b/qa/workunits/rgw/run-bucket-check.sh
new file mode 100755
index 000000000..85e02db5e
--- /dev/null
+++ b/qa/workunits/rgw/run-bucket-check.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+set -ex
+
+# assume working ceph environment (radosgw-admin in path) and rgw on localhost:80
+# localhost::443 for ssl
+
+mydir=`dirname $0`
+
+python3 -m venv $mydir
+source $mydir/bin/activate
+pip install pip --upgrade
+pip install boto3
+
+## run test
+$mydir/bin/python3 $mydir/test_rgw_bucket_check.py
+
+deactivate
+echo OK.
+
diff --git a/qa/workunits/rgw/run-datacache.sh b/qa/workunits/rgw/run-datacache.sh
new file mode 100755
index 000000000..5c00da1da
--- /dev/null
+++ b/qa/workunits/rgw/run-datacache.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+set -ex
+
+#assume working ceph environment (radosgw-admin in path) and rgw on localhost:80
+# localhost::443 for ssl
+
+mydir=`dirname $0`
+
+python3 -m venv $mydir
+source $mydir/bin/activate
+pip install pip --upgrade
+pip install configobj
+
+## run test
+$mydir/bin/python3 $mydir/test_rgw_datacache.py
+
+deactivate
+echo OK.
+
diff --git a/qa/workunits/rgw/run-reshard.sh b/qa/workunits/rgw/run-reshard.sh
new file mode 100755
index 000000000..bdab0aabb
--- /dev/null
+++ b/qa/workunits/rgw/run-reshard.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+set -ex
+
+# this test uses fault injection to abort during 'radosgw-admin bucket reshard'
+# disable coredumps so teuthology won't mark a failure
+ulimit -c 0
+
+#assume working ceph environment (radosgw-admin in path) and rgw on localhost:80
+# localhost::443 for ssl
+
+mydir=`dirname $0`
+
+python3 -m venv $mydir
+source $mydir/bin/activate
+pip install pip --upgrade
+pip install boto3
+
+## run test
+$mydir/bin/python3 $mydir/test_rgw_reshard.py
+
+deactivate
+echo OK.
+
diff --git a/qa/workunits/rgw/run-s3tests.sh b/qa/workunits/rgw/run-s3tests.sh
new file mode 100755
index 000000000..727bef9eb
--- /dev/null
+++ b/qa/workunits/rgw/run-s3tests.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+set -ex
+
+# run s3-tests from current directory. assume working
+# ceph environment (radosgw-admin in path) and rgw on localhost:8000
+# (the vstart default).
+
+branch=$1
+[ -z "$1" ] && branch=master
+port=$2
+[ -z "$2" ] && port=8000 # this is vstart's default
+
+##
+
+[ -z "$BUILD_DIR" ] && BUILD_DIR=build
+
+if [ -e CMakeCache.txt ]; then
+ BIN_PATH=$PWD/bin
+elif [ -e $root_path/../${BUILD_DIR}/CMakeCache.txt ]; then
+ cd $root_path/../${BUILD_DIR}
+ BIN_PATH=$PWD/bin
+fi
+PATH=$PATH:$BIN_PATH
+
+dir=tmp.s3-tests.$$
+
+# clone and bootstrap
+mkdir $dir
+cd $dir
+git clone https://github.com/ceph/s3-tests
+cd s3-tests
+git checkout ceph-$branch
+S3TEST_CONF=s3tests.conf.SAMPLE tox -- -m "not fails_on_rgw and not sse_s3 and not lifecycle_expiration and not test_of_sts and not webidentity_test" -v
+
+cd ../..
+rm -rf $dir
+
+echo OK.
+
diff --git a/qa/workunits/rgw/run-versioning.sh b/qa/workunits/rgw/run-versioning.sh
new file mode 100755
index 000000000..df60b7b03
--- /dev/null
+++ b/qa/workunits/rgw/run-versioning.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+set -ex
+
+# assume working ceph environment (radosgw-admin in path) and rgw on localhost:80
+# localhost::443 for ssl
+
+mydir=`dirname $0`
+
+python3 -m venv $mydir
+source $mydir/bin/activate
+pip install pip --upgrade
+pip install boto3
+
+## run test
+$mydir/bin/python3 $mydir/test_rgw_versioning.py
+
+deactivate
+echo OK.
+
diff --git a/qa/workunits/rgw/s3_bucket_quota.pl b/qa/workunits/rgw/s3_bucket_quota.pl
new file mode 100755
index 000000000..7f5476ef6
--- /dev/null
+++ b/qa/workunits/rgw/s3_bucket_quota.pl
@@ -0,0 +1,393 @@
+#! /usr/bin/perl
+
+=head1 NAME
+
+s3_bucket_quota.pl - Script to test the rgw bucket quota functionality using s3 interface.
+
+=head1 SYNOPSIS
+
+Use:
+ perl s3_bucket_quota.pl [--help]
+
+Examples:
+ perl s3_bucket_quota.pl
+ or
+ perl s3_bucket_quota.pl --help
+
+=head1 DESCRIPTION
+
+This script intends to test the rgw bucket quota funcionality using s3 interface
+and reports the test results
+
+=head1 ARGUMENTS
+
+s3_bucket_quota.pl takes the following arguments:
+ --help
+ (optional) Displays the usage message.
+
+=cut
+
+use Amazon::S3;
+use Data::Dumper;
+#use strict;
+use IO::File;
+use Getopt::Long;
+use Digest::MD5;
+use Pod::Usage();
+use FindBin;
+use lib $FindBin::Bin;
+use s3_utilities;
+use Net::Domain qw(hostfqdn);
+
+my $help;
+
+Getopt::Long::GetOptions(
+ 'help' => \$help
+);
+Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
+
+#== local variables ===
+our $mytestfilename;
+my $mytestfilename1;
+my $logmsg;
+my $kruft;
+my $s3;
+my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
+my $port = $ENV{RGW_PORT}||80;
+our $hostname = "$hostdom:$port";
+our $testfileloc;
+my $rgw_user = "qa_user";
+
+# Function that deletes the user $rgw_user and write to logfile.
+sub delete_user
+{
+ my $cmd = "$radosgw_admin user rm --uid=$rgw_user";
+ my $cmd_op = get_command_output($cmd);
+ if ($cmd_op !~ /aborting/){
+ print "user $rgw_user deleted\n";
+ } else {
+ print "user $rgw_user NOT deleted\n";
+ return 1;
+ }
+ return 0;
+}
+
+sub quota_set_max_size {
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=1048576000`;
+ if ($set_quota !~ /./){
+ print "quota set for the bucket: $bucketname \n";
+ } else {
+ print "quota set failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_set_max_size_zero {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=0`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname with max size as zero\n");
+ } else {
+ fail ("quota set with max size 0 failed for the bucket: $bucketname \n");
+ }
+ delete_bucket();
+}
+
+sub quota_set_max_objs_zero {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=0`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname with max objects as zero\n");
+ } else {
+ fail ("quota set with max objects 0 failed for the bucket: $bucketname \n");
+ }
+ delete_bucket();
+}
+
+sub quota_set_neg_size {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=-1`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname with max size -1\n");
+ } else {
+ fail ("quota set failed for the bucket: $bucketname with max size -1 \n");
+ }
+ delete_bucket();
+}
+
+sub quota_set_neg_objs {
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=-1`;
+ if ($set_quota !~ /./){
+ pass ("quota set for the bucket: $bucketname max objects -1 \n");
+ } else {
+ fail ("quota set failed for the bucket: $bucketname \n with max objects -1");
+ }
+ delete_bucket();
+}
+
+sub quota_set_user_objs {
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=bucket`;
+ my $set_quota1 = `$radosgw_admin quota set --bucket=$bucketname --max-objects=1`;
+ if ($set_quota1 !~ /./){
+ print "bucket quota max_objs set for the given user: $bucketname \n";
+ } else {
+ print "bucket quota max_objs set failed for the given user: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_set_user_size {
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=bucket`;
+ my $set_quota1 = `$radosgw_admin quota set --bucket=$bucketname --max-size=1048576000`;
+ if ($set_quota1 !~ /./){
+ print "bucket quota max size set for the given user: $bucketname \n";
+ } else {
+ print "bucket quota max size set failed for the user: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_set_max_obj {
+ # set max objects
+ my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=1`;
+ if ($set_quota !~ /./){
+ print "quota set for the bucket: $bucketname \n";
+ } else {
+ print "quota set failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_enable {
+ my $en_quota = `$radosgw_admin quota enable --bucket=$bucketname`;
+ if ($en_quota !~ /./){
+ print "quota enabled for the bucket: $bucketname \n";
+ } else {
+ print "quota enable failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_disable {
+ my $dis_quota = `$radosgw_admin quota disable --bucket=$bucketname`;
+ if ($dis_quota !~ /./){
+ print "quota disabled for the bucket: $bucketname \n";
+ } else {
+ print "quota disable failed for the bucket: $bucketname \n";
+ exit 1;
+ }
+ return 0;
+}
+
+# upload a file to the bucket
+sub upload_file {
+ print "adding file to bucket: $mytestfilename\n";
+ ($bucket->add_key_filename( $mytestfilename, $testfileloc,
+ { content_type => 'text/plain', },
+ ) and (print "upload file successful\n" ) and return 0 ) or (return 1);
+}
+
+# delete the bucket
+sub delete_bucket {
+ #($bucket->delete_key($mytestfilename1) and print "delete keys on bucket succeeded second time\n" ) or die $s3->err . "delete keys on bucket failed second time\n" . $s3->errstr;
+ ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
+}
+
+# set bucket quota with max_objects and verify
+sub test_max_objects {
+ my $size = '10Mb';
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_obj();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0){
+ pass ( "Test max objects passed" );
+ } else {
+ fail ( "Test max objects failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# Set bucket quota for specific user and ensure max objects set for the user is validated
+sub test_max_objects_per_user{
+ my $size = '10Mb';
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_objs();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0){
+ pass ( "Test max objects for the given user passed" );
+ } else {
+ fail ( "Test max objects for the given user failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota with max_objects and try to exceed the max_objects and verify
+sub test_beyond_max_objs {
+ my $size = "10Mb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_obj();
+ quota_enable();
+ upload_file();
+ my $ret_value = readd_file();
+ if ($ret_value == 1){
+ pass ( "set max objects and test beyond max objects passed" );
+ } else {
+ fail ( "set max objects and test beyond max objects failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for a user with max_objects and try to exceed the max_objects and verify
+sub test_beyond_max_objs_user {
+ my $size = "10Mb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_objs();
+ quota_enable();
+ upload_file();
+ my $ret_value = readd_file();
+ if ($ret_value == 1){
+ pass ( "set max objects for a given user and test beyond max objects passed" );
+ } else {
+ fail ( "set max objects for a given user and test beyond max objects failed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for max size and ensure it is validated
+sub test_quota_size {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_size();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 1) {
+ pass ( "set max size and ensure that objects upload beyond max size is not entertained" );
+ my $retdel = delete_keys($mytestfilename);
+ if ($retdel == 0) {
+ print "delete objects successful \n";
+ my $size1 = "1Gb";
+ create_file($size1);
+ my $ret_val1 = upload_file();
+ if ($ret_val1 == 0) {
+ pass ( "set max size and ensure that the max size is in effect" );
+ } else {
+ fail ( "set max size and ensure the max size takes effect" );
+ }
+ }
+ } else {
+ fail ( "set max size and ensure that objects beyond max size is not allowed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for max size for a given user and ensure it is validated
+sub test_quota_size_user {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_size();
+ quota_enable();
+ my $ret_value = upload_file();
+ if ($ret_value == 1) {
+ pass ( "set max size for a given user and ensure that objects upload beyond max size is not entertained" );
+ my $retdel = delete_keys($mytestfilename);
+ if ($retdel == 0) {
+ print "delete objects successful \n";
+ my $size1 = "1Gb";
+ create_file($size1);
+ my $ret_val1 = upload_file();
+ if ($ret_val1 == 0) {
+ pass ( "set max size for a given user and ensure that the max size is in effect" );
+ } else {
+ fail ( "set max size for a given user and ensure the max size takes effect" );
+ }
+ }
+ } else {
+ fail ( "set max size for a given user and ensure that objects beyond max size is not allowed" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota size but disable quota and verify
+sub test_quota_size_disabled {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_max_size();
+ quota_disable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0) {
+ pass ( "bucket quota size doesnt take effect when quota is disabled" );
+ } else {
+ fail ( "bucket quota size doesnt take effect when quota is disabled" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota size for a given user but disable quota and verify
+sub test_quota_size_disabled_user {
+ my $ret_value;
+ my $size = "2Gb";
+ create_file($size);
+ run_s3($rgw_user);
+ quota_set_user_size();
+ quota_disable();
+ my $ret_value = upload_file();
+ if ($ret_value == 0) {
+ pass ( "bucket quota size for a given user doesnt take effect when quota is disabled" );
+ } else {
+ fail ( "bucket quota size for a given user doesnt take effect when quota is disabled" );
+ }
+ delete_user();
+ delete_keys($mytestfilename);
+ delete_bucket();
+}
+
+# set bucket quota for specified user and verify
+
+#== Main starts here===
+ceph_os_info();
+test_max_objects();
+test_max_objects_per_user();
+test_beyond_max_objs();
+test_beyond_max_objs_user();
+quota_set_max_size_zero();
+quota_set_max_objs_zero();
+quota_set_neg_objs();
+quota_set_neg_size();
+test_quota_size();
+test_quota_size_user();
+test_quota_size_disabled();
+test_quota_size_disabled_user();
+
+print "OK";
diff --git a/qa/workunits/rgw/s3_multipart_upload.pl b/qa/workunits/rgw/s3_multipart_upload.pl
new file mode 100755
index 000000000..ab29e6b03
--- /dev/null
+++ b/qa/workunits/rgw/s3_multipart_upload.pl
@@ -0,0 +1,151 @@
+#! /usr/bin/perl
+
+=head1 NAME
+
+s3_multipart_upload.pl - Script to test rgw multipart upload using s3 interface.
+
+=head1 SYNOPSIS
+
+Use:
+ perl s3_multipart_upload.pl [--help]
+
+Examples:
+ perl s3_multipart_upload.pl
+ or
+ perl s3_multipart_upload.pl --help
+
+=head1 DESCRIPTION
+
+This script intends to test the rgw multipart upload followed by a download
+and verify checksum using s3 interface and reports test results
+
+=head1 ARGUMENTS
+
+s3_multipart_upload.pl takes the following arguments:
+ --help
+ (optional) Displays the usage message.
+
+=cut
+
+use Amazon::S3;
+use Data::Dumper;
+use IO::File;
+use Getopt::Long;
+use Digest::MD5;
+use Pod::Usage();
+use FindBin;
+use lib $FindBin::Bin;
+use s3_utilities;
+use Net::Domain qw(hostfqdn);
+
+my $help;
+
+Getopt::Long::GetOptions(
+ 'help' => \$help
+);
+Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
+
+#== local variables ===
+my $s3;
+my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
+my $port = $ENV{RGW_PORT}||80;
+our $hostname = "$hostdom:$port";
+our $testfileloc;
+our $mytestfilename;
+
+# upload a file to the bucket
+sub upload_file {
+ my ($fsize, $i) = @_;
+ create_file($fsize, $i);
+ print "adding file to bucket $bucketname: $mytestfilename\n";
+ ($bucket->add_key_filename( $mytestfilename, $testfileloc,
+ { content_type => 'text/plain', },
+ ) and (print "upload file successful\n" ) and return 0 ) or (print "upload failed\n" and return 1);
+}
+
+# delete the bucket
+sub delete_bucket {
+ ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
+}
+
+# Function to perform multipart upload of given file size to the user bucket via s3 interface
+sub multipart_upload
+{
+ my ($size, $parts) = @_;
+ # generate random user every time
+ my $user = rand();
+ # Divide the file size in to equal parts and upload to bucket in multiple parts
+ my $fsize = ($size/$parts);
+ my $fsize1;
+ run_s3($user);
+ if ($parts == 10){
+ $fsize1 = '100Mb';
+ } elsif ($parts == 100){
+ $fsize1 = '10Mb';
+ }
+ foreach my $i(1..$parts){
+ print "uploading file - part $i \n";
+ upload_file($fsize1, $i);
+ }
+ fetch_file_from_bucket($fsize1, $parts);
+ compare_cksum($fsize1, $parts);
+ purge_data($user);
+}
+
+# Function to download the files from bucket to verify there is no data corruption
+sub fetch_file_from_bucket
+{
+ # fetch file from the bucket
+ my ($fsize, $parts) = @_;
+ foreach my $i(1..$parts){
+ my $src_file = "$fsize.$i";
+ my $dest_file = "/tmp/downloadfile.$i";
+ print
+ "Downloading $src_file from bucket to $dest_file \n";
+ $response =
+ $bucket->get_key_filename( $src_file, GET,
+ $dest_file )
+ or die $s3->err . ": " . $s3->errstr;
+ }
+}
+
+# Compare the source file with destination file and verify checksum to ensure
+# the files are not corrupted
+sub compare_cksum
+{
+ my ($fsize, $parts)=@_;
+ my $md5 = Digest::MD5->new;
+ my $flag = 0;
+ foreach my $i (1..$parts){
+ my $src_file = "/tmp/"."$fsize".".$i";
+ my $dest_file = "/tmp/downloadfile".".$i";
+ open( FILE, $src_file )
+ or die "Error: Could not open $src_file for MD5 checksum...";
+ open( DLFILE, $dest_file )
+ or die "Error: Could not open $dest_file for MD5 checksum.";
+ binmode(FILE);
+ binmode(DLFILE);
+ my $md5sum = $md5->addfile(*FILE)->hexdigest;
+ my $md5sumdl = $md5->addfile(*DLFILE)->hexdigest;
+ close FILE;
+ close DLFILE;
+ # compare the checksums
+ if ( $md5sum eq $md5sumdl ) {
+ $flag++;
+ }
+ }
+ if ($flag == $parts){
+ pass("checksum verification for multipart upload passed" );
+ }else{
+ fail("checksum verification for multipart upload failed" );
+ }
+}
+
+#== Main starts here===
+ceph_os_info();
+check();
+# The following test runs multi part upload of file size 1Gb in 10 parts
+multipart_upload('1048576000', 10);
+# The following test runs multipart upload of 1 Gb file in 100 parts
+multipart_upload('1048576000', 100);
+print "OK";
diff --git a/qa/workunits/rgw/s3_user_quota.pl b/qa/workunits/rgw/s3_user_quota.pl
new file mode 100755
index 000000000..6d5c02a9a
--- /dev/null
+++ b/qa/workunits/rgw/s3_user_quota.pl
@@ -0,0 +1,191 @@
+#! /usr/bin/perl
+
+=head1 NAME
+
+s3_user_quota.pl - Script to test the rgw user quota functionality using s3 interface.
+
+=head1 SYNOPSIS
+
+Use:
+ perl s3_user_quota.pl [--help]
+
+Examples:
+ perl s3_user_quota.pl
+ or
+ perl s3_user_quota.pl --help
+
+=head1 DESCRIPTION
+
+This script intends to test the rgw user quota funcionality using s3 interface
+and reports the test results
+
+=head1 ARGUMENTS
+
+s3_user_quota.pl takes the following arguments:
+ --help
+ (optional) Displays the usage message.
+
+=cut
+
+use Amazon::S3;
+use Data::Dumper;
+use IO::File;
+use Getopt::Long;
+use Digest::MD5;
+use Pod::Usage();
+use FindBin;
+use lib $FindBin::Bin;
+use s3_utilities;
+use Net::Domain qw(hostfqdn);
+
+my $help;
+
+Getopt::Long::GetOptions(
+ 'help' => \$help
+);
+Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
+
+#== local variables ===
+our $mytestfilename;
+my $mytestfilename1;
+my $logmsg;
+my $kruft;
+my $s3;
+my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
+my $port = $ENV{RGW_PORT}||80;
+our $hostname = "$hostdom:$port";
+our $testfileloc;
+our $cnt;
+
+sub quota_set_max_size_per_user {
+ my ($maxsize, $size1,$rgw_user) = @_;
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-size=$maxsize`;
+ if (($set_quota !~ /./)&&($maxsize == 0)){
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 1){
+ pass("quota set for user: $rgw_user with max_size=$maxsize passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_size=$maxsize failed" );
+ }
+ } elsif (($set_quota !~ /./) && ($maxsize != 0)) {
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 0){
+ pass("quota set for user: $rgw_user with max_size=$maxsize passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_size=$maxsize failed" );
+ }
+ }
+ delete_keys($mytestfilename);
+ purge_data($rgw_user);
+ return 0;
+}
+
+sub max_size_per_user {
+ my ($maxsize, $size1,$rgw_user) = @_;
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-size=$maxsize`;
+ if (($set_quota !~ /./) && ($maxsize != 0)) {
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 0){
+ $cnt++;
+ }
+ }
+ return $cnt;
+}
+
+sub quota_set_max_obj_per_user {
+ # set max objects
+ my ($maxobjs, $size1, $rgw_user) = @_;
+ run_s3($rgw_user);
+ my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-objects=$maxobjs`;
+ if (($set_quota !~ /./) && ($maxobjs == 0)){
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 1){
+ pass("quota set for user: $rgw_user with max_objects=$maxobjs passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_objects=$maxobjs failed" );
+ }
+ } elsif (($set_quota !~ /./) && ($maxobjs == 1)) {
+ my $ret = test_max_objs($size1, $rgw_user);
+ if ($ret == 0){
+ pass("quota set for user: $rgw_user with max_objects=$maxobjs passed" );
+ }else {
+ fail("quota set for user: $rgw_user with max_objects=$maxobjs failed" );
+ }
+ }
+ delete_keys($mytestfilename);
+ purge_data($rgw_user);
+}
+
+sub quota_enable_user {
+ my ($rgw_user) = @_;
+ my $en_quota = `$radosgw_admin quota enable --uid=$rgw_user --quota-scope=user`;
+ if ($en_quota !~ /./){
+ print "quota enabled for the user $rgw_user \n";
+ } else {
+ print "quota enable failed for the user $rgw_user \n";
+ exit 1;
+ }
+ return 0;
+}
+
+sub quota_disable_user {
+ my $dis_quota = `$radosgw_admin quota disable --uid=$rgw_user --quota-scope=user`;
+ if ($dis_quota !~ /./){
+ print "quota disabled for the user $rgw_user \n";
+ } else {
+ print "quota disable failed for the user $rgw_user \n";
+ exit 1;
+ }
+ return 0;
+}
+
+# upload a file to the bucket
+sub upload_file {
+ print "adding file to bucket $bucketname: $mytestfilename\n";
+ ($bucket->add_key_filename( $mytestfilename, $testfileloc,
+ { content_type => 'text/plain', },
+ ) and (print "upload file successful\n" ) and return 0 ) or (return 1);
+}
+
+# delete the bucket
+sub delete_bucket {
+ ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
+}
+
+#Function to upload the given file size to bucket and verify
+sub test_max_objs {
+ my ($size, $rgw_user) = @_;
+ create_file($size);
+ quota_enable_user($rgw_user);
+ my $ret_value = upload_file();
+ return $ret_value;
+}
+
+# set user quota and ensure it is validated
+sub test_user_quota_max_size{
+ my ($max_buckets,$size, $fsize) = @_;
+ my $usr = rand();
+ foreach my $i (1..$max_buckets){
+ my $ret_value = max_size_per_user($size, $fsize, $usr );
+ }
+ if ($ret_value == $max_buckets){
+ fail( "user quota max size for $usr failed on $max_buckets buckets" );
+ } else {
+ pass( "user quota max size for $usr passed on $max_buckets buckets" );
+ }
+ delete_keys($mytestfilename);
+ purge_data($usr);
+}
+
+#== Main starts here===
+ceph_os_info();
+check();
+quota_set_max_obj_per_user('0', '10Mb', 'usr1');
+quota_set_max_obj_per_user('1', '10Mb', 'usr2');
+quota_set_max_size_per_user(0, '10Mb', 'usr1');
+quota_set_max_size_per_user(1048576000, '1Gb', 'usr2');
+test_user_quota_max_size(3,1048576000,'100Mb');
+test_user_quota_max_size(2,1048576000, '1Gb');
+print "OK";
diff --git a/qa/workunits/rgw/s3_utilities.pm b/qa/workunits/rgw/s3_utilities.pm
new file mode 100644
index 000000000..3c3fae900
--- /dev/null
+++ b/qa/workunits/rgw/s3_utilities.pm
@@ -0,0 +1,233 @@
+# Common subroutines shared by the s3 testing code
+my $sec;
+my $min;
+my $hour;
+my $mon;
+my $year;
+my $mday;
+my $wday;
+my $yday;
+my $isdst;
+my $PASS_CNT = 0;
+my $FAIL_CNT = 0;
+
+our $radosgw_admin = $ENV{RGW_ADMIN}||"sudo radosgw-admin";
+
+# function to get the current time stamp from the test set up
+sub get_timestamp {
+ ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
+ if ($mon < 10) { $mon = "0$mon"; }
+ if ($hour < 10) { $hour = "0$hour"; }
+ if ($min < 10) { $min = "0$min"; }
+ if ($sec < 10) { $sec = "0$sec"; }
+ $year=$year+1900;
+ return $year . '_' . $mon . '_' . $mday . '_' . $hour . '_' . $min . '_' . $sec;
+}
+
+# Function to check if radosgw is already running
+sub get_status {
+ my $service = "radosgw";
+ my $cmd = "pgrep $service";
+ my $status = get_cmd_op($cmd);
+ if ($status =~ /\d+/ ){
+ return 0;
+ }
+ return 1;
+}
+
+# function to execute the command and return output
+sub get_cmd_op
+{
+ my $cmd = shift;
+ my $excmd = `$cmd`;
+ return $excmd;
+}
+
+#Function that executes the CLI commands and returns the output of the command
+sub get_command_output {
+ my $cmd_output = shift;
+ open( FH, ">>$test_log" );
+ print FH "\"$cmd_output\"\n";
+ my $exec_cmd = `$cmd_output 2>&1`;
+ print FH "$exec_cmd\n";
+ close(FH);
+ return $exec_cmd;
+}
+
+# Function to get the hostname
+sub get_hostname
+{
+ my $cmd = "hostname";
+ my $get_host = get_command_output($cmd);
+ chomp($get_host);
+ return($get_host);
+}
+
+sub pass {
+ my ($comment) = @_;
+ print "Comment required." unless length $comment;
+ chomp $comment;
+ print_border2();
+ print "Test case: $TC_CNT PASSED - $comment \n";
+ print_border2();
+ $PASS_CNT++;
+}
+
+sub fail {
+ my ($comment) = @_;
+ print "Comment required." unless length $comment;
+ chomp $comment;
+ print_border2();
+ print "Test case: $TC_CNT FAILED - $comment \n";
+ print_border2();
+ $FAIL_CNT++;
+}
+
+sub print_border2 {
+ print "~" x 90 . "\n";
+}
+
+# Function to create the user "qa_user" and extract the user access_key and secret_key of the user
+sub get_user_info
+{
+ my ($rgw_user) = @_;
+ my $cmd = "$radosgw_admin user create --uid=$rgw_user --display-name=$rgw_user";
+ my $cmd_op = get_command_output($cmd);
+ if ($cmd_op !~ /keys/){
+ return (0,0);
+ }
+ my @get_user = (split/\n/,$cmd_op);
+ foreach (@get_user) {
+ if ($_ =~ /access_key/ ){
+ $get_acc_key = $_;
+ } elsif ($_ =~ /secret_key/ ){
+ $get_sec_key = $_;
+ }
+ }
+ my $access_key = $get_acc_key;
+ my $acc_key = (split /:/, $access_key)[1];
+ $acc_key =~ s/\\//g;
+ $acc_key =~ s/ //g;
+ $acc_key =~ s/"//g;
+ $acc_key =~ s/,//g;
+ my $secret_key = $get_sec_key;
+ my $sec_key = (split /:/, $secret_key)[1];
+ $sec_key =~ s/\\//g;
+ $sec_key =~ s/ //g;
+ $sec_key =~ s/"//g;
+ $sec_key =~ s/,//g;
+ return ($acc_key, $sec_key);
+}
+
+# Function that deletes the given user and all associated user data
+sub purge_data
+{
+ my ($rgw_user) = @_;
+ my $cmd = "$radosgw_admin user rm --uid=$rgw_user --purge-data";
+ my $cmd_op = get_command_output($cmd);
+ if ($cmd_op !~ /./){
+ print "user $rgw_user deleted\n";
+ } else {
+ print "user $rgw_user NOT deleted\n";
+ return 1;
+ }
+ return 0;
+}
+
+# Read PRETTY_NAME from /etc/os-release
+sub os_pretty_name
+{
+ open(FH, '<', '/etc/os-release') or die $!;
+ while (my $line = <FH>) {
+ chomp $line;
+ if ($line =~ /^\s*PRETTY_NAME=\"?([^"]*)\"?/) {
+ return $1;
+ }
+ }
+ close(FH);
+}
+
+
+# Function to get the Ceph and distro info
+sub ceph_os_info
+{
+ my $ceph_v = get_command_output ( "ceph -v" );
+ my @ceph_arr = split(" ",$ceph_v);
+ $ceph_v = "Ceph Version: $ceph_arr[2]";
+ my $os_distro = os_pretty_name();
+ $os_distro = "Linux Flavor:$os_distro";
+ return ($ceph_v, $os_distro);
+}
+
+# Execute the test case based on the input to the script
+sub create_file {
+ my ($file_size, $part) = @_;
+ my $cnt;
+ $mytestfilename = "$file_size.$part";
+ $testfileloc = "/tmp/".$mytestfilename;
+ if ($file_size == '10Mb'){
+ $cnt = 1;
+ } elsif ($file_size == '100Mb'){
+ $cnt = 10;
+ } elsif ($file_size == '500Mb'){
+ $cnt = 50;
+ } elsif ($file_size == '1Gb'){
+ $cnt = 100;
+ } elsif ($file_size == '2Gb'){
+ $cnt = 200;
+ }
+ my $ret = system("dd if=/dev/zero of=$testfileloc bs=10485760 count=$cnt");
+ if ($ret) { exit 1 };
+ return 0;
+}
+
+sub run_s3
+{
+# Run tests for the S3 functionality
+ # Modify access key and secret key to suit the user account
+ my ($user) = @_;
+ our ( $access_key, $secret_key ) = get_user_info($user);
+ if ( ($access_key) && ($secret_key) ) {
+ $s3 = Amazon::S3->new(
+ {
+ aws_access_key_id => $access_key,
+ aws_secret_access_key => $secret_key,
+ host => $hostname,
+ secure => 0,
+ retry => 1,
+ }
+ );
+ }
+
+our $bucketname = 'buck_'.get_timestamp();
+# create a new bucket (the test bucket)
+our $bucket = $s3->add_bucket( { bucket => $bucketname } )
+ or die $s3->err. "bucket $bucketname create failed\n". $s3->errstr;
+ print "Bucket Created: $bucketname \n";
+ return 0;
+}
+
+# delete keys
+sub delete_keys {
+ (($bucket->delete_key($_[0])) and return 0) or return 1;
+}
+
+# Read the file back to bucket
+sub readd_file {
+ system("dd if=/dev/zero of=/tmp/10MBfile1 bs=10485760 count=1");
+ $mytestfilename1 = '10MBfile1';
+ print "readding file to bucket: $mytestfilename1\n";
+ ((($bucket->add_key_filename( $mytestfilename1, $testfileloc,
+ { content_type => 'text/plain', },
+ )) and (print "readding file success\n") and return 0) or (return 1));
+}
+
+# check if rgw service is already running
+sub check
+{
+ my $state = get_status();
+ if ($state) {
+ exit 1;
+ }
+}
+1
diff --git a/qa/workunits/rgw/test-keystone-service-token.py b/qa/workunits/rgw/test-keystone-service-token.py
new file mode 100755
index 000000000..2c7f21e93
--- /dev/null
+++ b/qa/workunits/rgw/test-keystone-service-token.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 Binero
+#
+# Author: Tobias Urdin <tobias.urdin@binero.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+
+import sys
+import requests
+import time
+
+
+# b4221c214dd64ee6a464g2153fae3813 is ID of deadbeef project
+SWIFT_URL = 'http://localhost:8000/swift/v1/AUTH_b4221c214dd64ee6a464g2153fae3813'
+KEYSTONE_URL = 'http://localhost:5000'
+
+
+def get_stats():
+ stats_url = '%s/stats' % KEYSTONE_URL
+ return requests.get(stats_url)
+
+
+def test_list_containers():
+ # Loop five list container requests with same token
+ for i in range(0, 5):
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-1'})
+ if r.status_code != 204:
+ print('FAILED, status code is %d not 204' % r.status_code)
+ sys.exit(1)
+
+ # Get stats from fake Keystone server
+ r = get_stats()
+ if r.status_code != 200:
+ print('FAILED, status code is %d not 200' % r.status_code)
+ sys.exit(1)
+ stats = r.json()
+
+ # Verify admin token was cached
+ if stats['post_total'] != 1:
+ print('FAILED, post_total stat is %d not 1' % stats['post_total'])
+ sys.exit(1)
+
+ # Verify user token was cached
+ if stats['get_total'] != 1:
+ print('FAILED, get_total stat is %d not 1' % stats['get_total'])
+ sys.exit(1)
+
+ print('Wait for cache to be invalid')
+ time.sleep(11)
+
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-1'})
+ if r.status_code != 204:
+ print('FAILED, status code is %d not 204' % r.status_code)
+ sys.exit(1)
+
+ # Get stats from fake Keystone server
+ r = get_stats()
+ if r.status_code != 200:
+ print('FAILED, status code is %d not 200' % r.status_code)
+ sys.exit(1)
+ stats = r.json()
+
+ if stats['post_total'] != 2:
+ print('FAILED, post_total stat is %d not 2' % stats['post_total'])
+ sys.exit(1)
+
+ if stats['get_total'] != 2:
+ print('FAILED, get_total stat is %d not 2' % stats['get_total'])
+ sys.exit(1)
+
+
+def test_expired_token():
+ # Try listing containers with an expired token
+ for i in range(0, 3):
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2'})
+ if r.status_code != 401:
+ print('FAILED, status code is %d not 401' % r.status_code)
+ sys.exit(1)
+
+ # Get stats from fake Keystone server
+ r = get_stats()
+ if r.status_code != 200:
+ print('FAILED, status code is %d not 200' % r.status_code)
+ sys.exit(1)
+ stats = r.json()
+
+ # Verify admin token was cached
+ if stats['post_total'] != 2:
+ print('FAILED, post_total stat is %d not 2' % stats['post_total'])
+ sys.exit(1)
+
+ # Verify we got to fake Keystone server since expired tokens is not cached
+ if stats['get_total'] != 5:
+ print('FAILED, get_total stat is %d not 5' % stats['get_total'])
+ sys.exit(1)
+
+
+def test_expired_token_with_service_token():
+ # Try listing containers with an expired token but with a service token
+ for i in range(0, 3):
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2', 'X-Service-Token': 'admin-token-1'})
+ if r.status_code != 204:
+ print('FAILED, status code is %d not 204' % r.status_code)
+ sys.exit(1)
+
+ # Get stats from fake Keystone server
+ r = get_stats()
+ if r.status_code != 200:
+ print('FAILED, status code is %d not 200' % r.status_code)
+ sys.exit(1)
+ stats = r.json()
+
+ # Verify admin token was cached
+ if stats['post_total'] != 2:
+ print('FAILED, post_total stat is %d not 2' % stats['post_total'])
+ sys.exit(1)
+
+ # Verify we got to fake Keystone server since expired tokens is not cached
+ if stats['get_total'] != 7:
+ print('FAILED, get_total stat is %d not 7' % stats['get_total'])
+ sys.exit(1)
+
+ print('Wait for cache to be invalid')
+ time.sleep(11)
+
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2', 'X-Service-Token': 'admin-token-1'})
+ if r.status_code != 204:
+ print('FAILED, status code is %d not 204' % r.status_code)
+ sys.exit(1)
+
+ # Get stats from fake Keystone server
+ r = get_stats()
+ if r.status_code != 200:
+ print('FAILED, status code is %d not 200' % r.status_code)
+ sys.exit(1)
+ stats = r.json()
+
+ if stats['post_total'] != 3:
+ print('FAILED, post_total stat is %d not 3' % stats['post_total'])
+ sys.exit(1)
+
+ if stats['get_total'] != 9:
+ print('FAILED, get_total stat is %d not 9' % stats['get_total'])
+ sys.exit(1)
+
+
+def test_expired_token_with_invalid_service_token():
+ print('Wait for cache to be invalid')
+ time.sleep(11)
+
+ # Test with a token that doesn't have allowed role as service token
+ for i in range(0, 3):
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2', 'X-Service-Token': 'user-token-1'})
+ if r.status_code != 401:
+ print('FAILED, status code is %d not 401' % r.status_code)
+ sys.exit(1)
+
+ # Make sure we get user-token-1 cached
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-1'})
+ if r.status_code != 204:
+ print('FAILED, status code is %d not 204' % r.status_code)
+ sys.exit(1)
+
+ # Test that a cached token (that is invalid as service token) cannot be used as service token
+ for i in range(0, 3):
+ r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2', 'X-Service-Token': 'user-token-1'})
+ if r.status_code != 401:
+ print('FAILED, status code is %d not 401' % r.status_code)
+ sys.exit(1)
+
+
+def main():
+ test_list_containers()
+ test_expired_token()
+ test_expired_token_with_service_token()
+ test_expired_token_with_invalid_service_token()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/qa/workunits/rgw/test_librgw_file.sh b/qa/workunits/rgw/test_librgw_file.sh
new file mode 100755
index 000000000..1371ff711
--- /dev/null
+++ b/qa/workunits/rgw/test_librgw_file.sh
@@ -0,0 +1,59 @@
+#!/bin/sh -e
+
+
+if [ -z ${AWS_ACCESS_KEY_ID} ]
+then
+ export AWS_ACCESS_KEY_ID=`openssl rand -base64 20`
+ export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 40`
+
+ radosgw-admin user create --uid ceph-test-librgw-file \
+ --access-key $AWS_ACCESS_KEY_ID \
+ --secret $AWS_SECRET_ACCESS_KEY \
+ --display-name "librgw test user" \
+ --email librgw@example.com || echo "librgw user exists"
+
+ # keyring override for teuthology env
+ KEYRING="/etc/ceph/ceph.keyring"
+ K="-k ${KEYRING}"
+fi
+
+# nfsns is the main suite
+
+# create herarchy, and then list it
+echo "phase 1.1"
+ceph_test_librgw_file_nfsns ${K} --hier1 --dirs1 --create --rename --verbose
+
+# the older librgw_file can consume the namespace
+echo "phase 1.2"
+ceph_test_librgw_file_nfsns ${K} --getattr --verbose
+
+# and delete the hierarchy
+echo "phase 1.3"
+ceph_test_librgw_file_nfsns ${K} --hier1 --dirs1 --delete --verbose
+
+# bulk create/delete buckets
+echo "phase 2.1"
+ceph_test_librgw_file_cd ${K} --create --multi --verbose
+echo "phase 2.2"
+ceph_test_librgw_file_cd ${K} --delete --multi --verbose
+
+# write continuation test
+echo "phase 3.1"
+ceph_test_librgw_file_aw ${K} --create --large --verify
+echo "phase 3.2"
+ceph_test_librgw_file_aw ${K} --delete --large
+
+# continued readdir
+echo "phase 4.1"
+ceph_test_librgw_file_marker ${K} --create --marker1 --marker2 --nobjs=100 --verbose
+echo "phase 4.2"
+ceph_test_librgw_file_marker ${K} --delete --verbose
+
+# advanced i/o--but skip readv/writev for now--split delete from
+# create and stat ops to avoid fault in sysobject cache
+echo "phase 5.1"
+ceph_test_librgw_file_gp ${K} --get --stat --put --create
+echo "phase 5.2"
+ceph_test_librgw_file_gp ${K} --delete
+
+exit 0
diff --git a/qa/workunits/rgw/test_rgw_bucket_check.py b/qa/workunits/rgw/test_rgw_bucket_check.py
new file mode 100755
index 000000000..bfa6d65d6
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_bucket_check.py
@@ -0,0 +1,194 @@
+#!/usr/bin/env python3
+
+import logging as log
+import json
+import botocore
+from common import exec_cmd, create_user, boto_connect, put_objects, create_unlinked_objects
+from botocore.config import Config
+
+"""
+Tests behavior of radosgw-admin bucket check commands.
+"""
+# The test cases in this file have been annotated for inventory.
+# To extract the inventory (in csv format) use the command:
+#
+# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+#
+#
+
+""" Constants """
+USER = 'check-tester'
+DISPLAY_NAME = 'Check Testing'
+ACCESS_KEY = 'OJODXSLNX4LUNHQG99PA'
+SECRET_KEY = '3l6ffld34qaymfomuh832j94738aie2x4p2o8h6n'
+BUCKET_NAME = 'check-bucket'
+
+def main():
+ """
+ execute bucket check commands
+ """
+ create_user(USER, DISPLAY_NAME, ACCESS_KEY, SECRET_KEY)
+
+ connection = boto_connect(ACCESS_KEY, SECRET_KEY, Config(retries = {
+ 'total_max_attempts': 1,
+ }))
+
+ # pre-test cleanup
+ try:
+ bucket = connection.Bucket(BUCKET_NAME)
+ bucket.objects.all().delete()
+ bucket.object_versions.all().delete()
+ bucket.delete()
+ except botocore.exceptions.ClientError as e:
+ if not e.response['Error']['Code'] == 'NoSuchBucket':
+ raise
+
+ bucket = connection.create_bucket(Bucket=BUCKET_NAME)
+
+ null_version_keys = ['a', 'z']
+ null_version_objs = put_objects(bucket, null_version_keys)
+
+ connection.BucketVersioning(BUCKET_NAME).enable()
+
+ ok_keys = ['a', 'b', 'c', 'd']
+ unlinked_keys = ['c', 'd', 'e', 'f']
+ ok_objs = put_objects(bucket, ok_keys)
+
+ # TESTCASE 'recalculated bucket check stats are correct'
+ log.debug('TEST: recalculated bucket check stats are correct\n')
+ exec_cmd(f'radosgw-admin bucket check --fix --bucket {BUCKET_NAME}')
+ out = exec_cmd(f'radosgw-admin bucket stats --bucket {BUCKET_NAME}')
+ json_out = json.loads(out)
+ log.debug(json_out['usage'])
+ assert json_out['usage']['rgw.main']['num_objects'] == 6
+
+ # TESTCASE 'bucket check unlinked does not report normal entries'
+ log.debug('TEST: bucket check unlinked does not report normal entries\n')
+ out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --min-age-hours 0 --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == 0
+
+ unlinked_objs = create_unlinked_objects(connection, bucket, unlinked_keys)
+
+ # TESTCASE 'bucket check unlinked finds unlistable entries'
+ log.debug('TEST: bucket check unlinked finds unlistable entries\n')
+ out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --min-age-hours 0 --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == len(unlinked_keys)
+
+ # TESTCASE 'unlinked entries are not listable'
+ log.debug('TEST: unlinked entries are not listable\n')
+ for ov in bucket.object_versions.all():
+ assert (ov.key, ov.version_id) not in unlinked_objs, f'object "{ov.key}:{ov.version_id}" was found in bucket listing'
+
+ # TESTCASE 'GET returns 404 for unlinked entry keys that have no other versions'
+ log.debug('TEST: GET returns 404 for unlinked entry keys that have no other versions\n')
+ noent_keys = set(unlinked_keys) - set(ok_keys)
+ for key in noent_keys:
+ try:
+ bucket.Object(key).get()
+ assert False, 'GET did not return 404 for key={key} with no prior successful PUT'
+ except botocore.exceptions.ClientError as e:
+ assert e.response['ResponseMetadata']['HTTPStatusCode'] == 404
+
+ # TESTCASE 'bucket check unlinked fixes unlistable entries'
+ log.debug('TEST: bucket check unlinked fixes unlistable entries\n')
+ out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --fix --min-age-hours 0 --rgw-olh-pending-timeout-sec 0 --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == len(unlinked_keys)
+ for o in unlinked_objs:
+ try:
+ connection.ObjectVersion(bucket.name, o[0], o[1]).head()
+ assert False, f'head for unlistable object {o[0]}:{o[1]} succeeded after fix'
+ except botocore.exceptions.ClientError as e:
+ assert e.response['ResponseMetadata']['HTTPStatusCode'] == 404
+
+ # TESTCASE 'bucket check unlinked fix does not affect normal entries'
+ log.debug('TEST: bucket check unlinked does not affect normal entries\n')
+ all_listable = list(bucket.object_versions.all())
+ assert len(all_listable) == len(ok_keys) + len(null_version_keys), 'some normal objects were not accounted for in object listing after unlinked fix'
+ for o in ok_objs:
+ assert o in map(lambda x: (x.key, x.version_id), all_listable), "normal object not listable after fix"
+ connection.ObjectVersion(bucket.name, o[0], o[1]).head()
+
+ # TESTCASE 'bucket check unlinked does not find new unlistable entries after fix'
+ log.debug('TEST: bucket check unlinked does not find new unlistable entries after fix\n')
+ out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --min-age-hours 0 --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == 0
+
+ # for this set of keys we can produce leftover OLH object/entries by
+ # deleting the normal object instance since we should already have a leftover
+ # pending xattr on the OLH object due to the errors associated with the
+ # prior unlinked entries that were created for the same keys
+ leftover_pending_xattr_keys = set(ok_keys).intersection(unlinked_keys)
+ objs_to_delete = filter(lambda x: x[0] in leftover_pending_xattr_keys, ok_objs)
+
+ for o in objs_to_delete:
+ connection.ObjectVersion(bucket.name, o[0], o[1]).delete()
+
+ for key in leftover_pending_xattr_keys:
+ out = exec_cmd(f'radosgw-admin bi list --bucket {BUCKET_NAME} --object {key}')
+ idx_entries = json.loads(out.replace(b'\x80', b'0x80'))
+ assert len(idx_entries) > 0, 'failed to create leftover OLH entries for key {key}'
+
+ # TESTCASE 'bucket check olh finds leftover OLH entries'
+ log.debug('TEST: bucket check olh finds leftover OLH entries\n')
+ out = exec_cmd(f'radosgw-admin bucket check olh --bucket {BUCKET_NAME} --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == len(leftover_pending_xattr_keys)
+
+ # TESTCASE 'bucket check olh fixes leftover OLH entries'
+ log.debug('TEST: bucket check olh fixes leftover OLH entries\n')
+ out = exec_cmd(f'radosgw-admin bucket check olh --bucket {BUCKET_NAME} --fix --rgw-olh-pending-timeout-sec 0 --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == len(leftover_pending_xattr_keys)
+
+ for key in leftover_pending_xattr_keys:
+ out = exec_cmd(f'radosgw-admin bi list --bucket {BUCKET_NAME} --object {key}')
+ idx_entries = json.loads(out.replace(b'\x80', b'0x80'))
+ assert len(idx_entries) == 0, 'index entries still exist for key={key} after olh fix'
+
+ # TESTCASE 'bucket check olh does not find new leftover OLH entries after fix'
+ log.debug('TEST: bucket check olh does not find new leftover OLH entries after fix\n')
+ out = exec_cmd(f'radosgw-admin bucket check olh --bucket {BUCKET_NAME} --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == 0
+
+ # TESTCASE 'bucket check fixes do not affect null version objects'
+ log.debug('TEST: verify that bucket check fixes do not affect null version objects\n')
+ for o in null_version_objs:
+ connection.ObjectVersion(bucket.name, o[0], 'null').head()
+
+ all_versions = list(map(lambda x: (x.key, x.version_id), bucket.object_versions.all()))
+ for key in null_version_keys:
+ assert (key, 'null') in all_versions
+
+ # TESTCASE 'bucket check stats are correct in the presence of unlinked entries'
+ log.debug('TEST: bucket check stats are correct in the presence of unlinked entries\n')
+ bucket.object_versions.all().delete()
+ null_version_objs = put_objects(bucket, null_version_keys)
+ ok_objs = put_objects(bucket, ok_keys)
+ unlinked_objs = create_unlinked_objects(connection, bucket, unlinked_keys)
+ exec_cmd(f'radosgw-admin bucket check --fix --bucket {BUCKET_NAME}')
+ out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --fix --min-age-hours 0 --rgw-olh-pending-timeout-sec 0 --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == len(unlinked_keys)
+ bucket.object_versions.all().delete()
+ out = exec_cmd(f'radosgw-admin bucket stats --bucket {BUCKET_NAME}')
+ json_out = json.loads(out)
+ log.debug(json_out['usage'])
+ assert json_out['usage']['rgw.main']['size'] == 0
+ assert json_out['usage']['rgw.main']['num_objects'] == 0
+ assert json_out['usage']['rgw.main']['size_actual'] == 0
+ assert json_out['usage']['rgw.main']['size_kb'] == 0
+ assert json_out['usage']['rgw.main']['size_kb_actual'] == 0
+ assert json_out['usage']['rgw.main']['size_kb_utilized'] == 0
+
+ # Clean up
+ log.debug("Deleting bucket {}".format(BUCKET_NAME))
+ bucket.object_versions.all().delete()
+ bucket.delete()
+
+main()
+log.info("Completed bucket check tests")
diff --git a/qa/workunits/rgw/test_rgw_datacache.py b/qa/workunits/rgw/test_rgw_datacache.py
new file mode 100755
index 000000000..f070ec0f1
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_datacache.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python3
+
+import logging as log
+from configobj import ConfigObj
+import subprocess
+import json
+import os
+
+"""
+Runs a test against a rgw with the data cache enabled. A client must be
+set in the config for this task. This client must be the same client
+that is in the config for the `rgw` task.
+
+In the `overrides` section `datacache` and `datacache` must be configured for
+the `rgw` task and the ceph conf overrides must contain the below config
+variables in the client section.
+
+`s3cmd` must be added as an extra_package to the install task.
+
+In the `workunit` task, `- rgw/run-datacache.sh` must be set for the client that
+is in the config for the `rgw` task. The `RGW_DATACACHE_PATH` variable must be
+set in the workunit's `env` and it must match the `datacache_path` given to the
+`rgw` task in `overrides`.
+Ex:
+- install:
+ extra_packages:
+ deb: ['s3cmd']
+ rpm: ['s3cmd']
+- overrides:
+ rgw:
+ datacache: true
+ datacache_path: /tmp/rgw_datacache
+ install:
+ extra_packages:
+ deb: ['s3cmd']
+ rpm: ['s3cmd']
+ ceph:
+ conf:
+ client:
+ rgw d3n l1 datacache persistent path: /tmp/rgw_datacache/
+ rgw d3n l1 datacache size: 10737417240
+ rgw d3n l1 local datacache enabled: true
+ rgw enable ops log: true
+- rgw:
+ client.0:
+- workunit:
+ clients:
+ client.0:
+ - rgw/run-datacache.sh
+ env:
+ RGW_DATACACHE_PATH: /tmp/rgw_datacache
+ cleanup: true
+"""
+
+log.basicConfig(level=log.DEBUG)
+
+""" Constants """
+USER = 'rgw_datacache_user'
+DISPLAY_NAME = 'DatacacheUser'
+ACCESS_KEY = 'NX5QOQKC6BH2IDN8HC7A'
+SECRET_KEY = 'LnEsqNNqZIpkzauboDcLXLcYaWwLQ3Kop0zAnKIn'
+BUCKET_NAME = 'datacachebucket'
+FILE_NAME = '7M.dat'
+GET_FILE_NAME = '7M-get.dat'
+
+def exec_cmd(cmd):
+ log.debug("exec_cmd(%s)", cmd)
+ try:
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+ out, err = proc.communicate()
+ if proc.returncode == 0:
+ log.info('command succeeded')
+ if out is not None: log.info(out)
+ return out
+ else:
+ raise Exception("error: %s \nreturncode: %s" % (err, proc.returncode))
+ except Exception as e:
+ log.error('command failed')
+ log.error(e)
+ return False
+
+def get_radosgw_endpoint():
+ out = exec_cmd('sudo netstat -nltp | egrep "rados|valgr"') # short for radosgw/valgrind
+ x = out.decode('utf8').split(" ")
+ port = [i for i in x if ':' in i][0].split(':')[1]
+ log.info('radosgw port: %s' % port)
+ proto = "http"
+ hostname = '127.0.0.1'
+
+ if port == '443':
+ proto = "https"
+
+ endpoint = hostname
+
+ log.info("radosgw endpoint is: %s", endpoint)
+ return endpoint, proto
+
+def create_s3cmd_config(path, proto):
+ """
+ Creates a minimal config file for s3cmd
+ """
+ log.info("Creating s3cmd config...")
+
+ use_https_config = "False"
+ log.info("proto for s3cmd config is %s", proto)
+ if proto == "https":
+ use_https_config = "True"
+
+ s3cmd_config = ConfigObj(
+ indent_type='',
+ infile={
+ 'default':
+ {
+ 'host_bucket': 'no.way.in.hell',
+ 'use_https': use_https_config,
+ },
+ }
+ )
+
+ f = open(path, 'wb')
+ s3cmd_config.write(f)
+ f.close()
+ log.info("s3cmd config written")
+
+def get_cmd_output(cmd_out):
+ out = cmd_out.decode('utf8')
+ out = out.strip('\n')
+ return out
+
+def main():
+ """
+ execute the datacache test
+ """
+ # setup for test
+ cache_dir = os.environ['RGW_DATACACHE_PATH']
+ log.debug("datacache dir from config is: %s", cache_dir)
+
+ out = exec_cmd('pwd')
+ pwd = get_cmd_output(out)
+ log.debug("pwd is: %s", pwd)
+
+ endpoint, proto = get_radosgw_endpoint()
+
+ # create 7M file to put
+ outfile = pwd + '/' + FILE_NAME
+ exec_cmd('dd if=/dev/urandom of=%s bs=1M count=7' % (outfile))
+
+ # create user
+ exec_cmd('radosgw-admin user create --uid %s --display-name %s --access-key %s --secret %s'
+ % (USER, DISPLAY_NAME, ACCESS_KEY, SECRET_KEY))
+
+ # create s3cmd config
+ s3cmd_config_path = pwd + '/s3cfg'
+ create_s3cmd_config(s3cmd_config_path, proto)
+
+ # create a bucket
+ exec_cmd('s3cmd --access_key=%s --secret_key=%s --config=%s --no-check-hostname --host=%s mb s3://%s'
+ % (ACCESS_KEY, SECRET_KEY, s3cmd_config_path, endpoint, BUCKET_NAME))
+
+ # put an object in the bucket
+ exec_cmd('s3cmd --access_key=%s --secret_key=%s --config=%s --no-check-hostname --host=%s put %s s3://%s'
+ % (ACCESS_KEY, SECRET_KEY, s3cmd_config_path, endpoint, outfile, BUCKET_NAME))
+
+ # get object from bucket
+ get_file_path = pwd + '/' + GET_FILE_NAME
+ exec_cmd('s3cmd --access_key=%s --secret_key=%s --config=%s --no-check-hostname --host=%s get s3://%s/%s %s --force'
+ % (ACCESS_KEY, SECRET_KEY, s3cmd_config_path, endpoint, BUCKET_NAME, FILE_NAME, get_file_path))
+
+ # get info of object
+ out = exec_cmd('radosgw-admin object stat --bucket=%s --object=%s' % (BUCKET_NAME, FILE_NAME))
+
+ json_op = json.loads(out)
+ cached_object_name = json_op['manifest']['prefix']
+ log.debug("Cached object name is: %s", cached_object_name)
+
+ # check that the cache is enabled (does the cache directory empty)
+ out = exec_cmd('find %s -type f | wc -l' % (cache_dir))
+ chk_cache_dir = int(get_cmd_output(out))
+ log.debug("Check cache dir content: %s", chk_cache_dir)
+ if chk_cache_dir == 0:
+ log.info("NOTICE: datacache test object not found, inspect if datacache was bypassed or disabled during this check.")
+ return
+
+ # list the files in the cache dir for troubleshooting
+ out = exec_cmd('ls -l %s' % (cache_dir))
+ # get name of cached object and check if it exists in the cache
+ out = exec_cmd('find %s -name "*%s1"' % (cache_dir, cached_object_name))
+ cached_object_path = get_cmd_output(out)
+ log.debug("Path of file in datacache is: %s", cached_object_path)
+ out = exec_cmd('basename %s' % (cached_object_path))
+ basename_cmd_out = get_cmd_output(out)
+ log.debug("Name of file in datacache is: %s", basename_cmd_out)
+
+ # check to see if the cached object is in Ceph
+ out = exec_cmd('rados ls -p default.rgw.buckets.data')
+ rados_ls_out = get_cmd_output(out)
+ log.debug("rados ls output is: %s", rados_ls_out)
+
+ assert(basename_cmd_out in rados_ls_out)
+ log.debug("RGW Datacache test SUCCESS")
+
+ # remove datacache dir
+ #cmd = exec_cmd('rm -rf %s' % (cache_dir))
+ #log.debug("RGW Datacache dir deleted")
+ #^ commenting for future refrence - the work unit will continue running tests and if the cache_dir is removed
+ # all the writes to cache will fail with errno 2 ENOENT No such file or directory.
+
+main()
+log.info("Completed Datacache tests")
diff --git a/qa/workunits/rgw/test_rgw_gc_log.sh b/qa/workunits/rgw/test_rgw_gc_log.sh
new file mode 100755
index 000000000..ab4015aef
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_gc_log.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_rgw_gc_log
+
+exit 0
diff --git a/qa/workunits/rgw/test_rgw_obj.sh b/qa/workunits/rgw/test_rgw_obj.sh
new file mode 100755
index 000000000..01dd2b5ee
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_obj.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_rgw_obj
+
+exit 0
diff --git a/qa/workunits/rgw/test_rgw_orphan_list.sh b/qa/workunits/rgw/test_rgw_orphan_list.sh
new file mode 100755
index 000000000..34d550cea
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_orphan_list.sh
@@ -0,0 +1,519 @@
+#!/usr/bin/env bash
+
+# set -x
+set -e
+
+# if defined, debug messages will be displayed and prepended with the string
+# debug="DEBUG"
+
+huge_size=5100 # in megabytes
+big_size=7 # in megabytes
+
+huge_obj=/tmp/huge_obj.temp.$$
+big_obj=/tmp/big_obj.temp.$$
+empty_obj=/tmp/empty_obj.temp.$$
+
+fifo=/tmp/orphan-fifo.$$
+awscli_dir=${HOME}/awscli_temp
+export PATH=${PATH}:${awscli_dir}
+
+rgw_host=$(hostname --fqdn)
+if echo "$rgw_host" | grep -q '\.' ; then
+ :
+else
+ host_domain=".front.sepia.ceph.com"
+ echo "WARNING: rgw hostname -- $rgw_host -- does not appear to be fully qualified; PUNTING and appending $host_domain"
+ rgw_host="${rgw_host}${host_domain}"
+fi
+rgw_port=80
+
+echo "Fully Qualified Domain Name: $rgw_host"
+
+success() {
+ echo OK.
+ exit 0
+}
+
+########################################################################
+# INSTALL AND CONFIGURE TOOLING
+
+install_awscli() {
+ # NB: this does verify authenticity and integrity of downloaded
+ # file; see
+ # https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html
+ here="$(pwd)"
+ cd "$HOME"
+ curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
+ unzip awscliv2.zip
+ mkdir -p $awscli_dir
+ ./aws/install -i $awscli_dir
+ cd "$here"
+}
+
+uninstall_awscli() {
+ here="$(pwd)"
+ cd "$HOME"
+ rm -rf $awscli_dir ./aws awscliv2.zip
+ cd "$here"
+}
+
+sudo yum -y install s3cmd
+sudo yum -y install python3-setuptools
+sudo yum -y install python3-pip
+sudo pip3 install --upgrade setuptools
+sudo pip3 install python-swiftclient
+
+# get ready for transition from s3cmd to awscli
+if false ;then
+ install_awscli
+ aws --version
+ uninstall_awscli
+fi
+
+s3config=/tmp/s3config.$$
+
+# do not include the port when it is 80; the host base is used in the
+# v4 signature and it needs to follow this convention for signatures
+# to match
+if [ "$rgw_port" -ne 80 ] ;then
+ s3_host_base="${rgw_host}:${rgw_port}"
+else
+ s3_host_base="$rgw_host"
+fi
+
+cat >${s3config} <<EOF
+[default]
+host_base = $s3_host_base
+access_key = 0555b35654ad1656d804
+secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
+bucket_location = us-east-1
+check_ssl_certificate = True
+check_ssl_hostname = True
+default_mime_type = binary/octet-stream
+delete_removed = False
+dry_run = False
+enable_multipart = True
+encoding = UTF-8
+encrypt = False
+follow_symlinks = False
+force = False
+guess_mime_type = True
+host_bucket = anything.with.three.dots
+multipart_chunk_size_mb = 15
+multipart_max_chunks = 10000
+recursive = False
+recv_chunk = 65536
+send_chunk = 65536
+signature_v2 = False
+socket_timeout = 300
+use_https = False
+use_mime_magic = True
+verbosity = WARNING
+EOF
+
+
+# set up swift authentication
+export ST_AUTH=http://${rgw_host}:${rgw_port}/auth/v1.0
+export ST_USER=test:tester
+export ST_KEY=testing
+
+create_users() {
+ # Create S3 user
+ local akey='0555b35654ad1656d804'
+ local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+ radosgw-admin user create --uid testid \
+ --access-key $akey --secret $skey \
+ --display-name 'M. Tester' --email tester@ceph.com
+
+ # Create Swift user
+ radosgw-admin user create --subuser=test:tester \
+ --display-name=Tester-Subuser --key-type=swift \
+ --secret=testing --access=full
+}
+
+myswift() {
+ if [ -n "$debug" ] ;then
+ echo "${debug}: swift --verbose --debug $@"
+ fi
+ swift --verbose --debug "$@"
+ local code=$?
+ if [ $code -ne 0 ] ;then
+ echo "ERROR: code = $code ; command = s3cmd --config=${s3config} --verbose --debug "$@""
+ exit $code
+ fi
+}
+
+mys3cmd() {
+ if [ -n "$debug" ] ;then
+ echo "${debug}: s3cmd --config=${s3config} --verbose --debug $@"
+ fi
+ s3cmd --config=${s3config} --verbose --debug "$@"
+ local code=$?
+ if [ $code -ne 0 ] ;then
+ echo "ERROR: code = $code ; command = s3cmd --config=${s3config} --verbose --debug "$@""
+ exit $code
+ fi
+}
+
+mys3uploadkill() {
+ if [ $# -ne 5 ] ;then
+ echo "$0: error expecting 5 arguments"
+ exit 1
+ fi
+
+ local_file="$1"
+ remote_bkt="$2"
+ remote_obj="$3"
+ fifo="$4"
+ stop_part="$5"
+
+ mkfifo $fifo
+
+ s3cmd --config=${s3config} put $local_file \
+ s3://${remote_bkt}/${remote_obj} \
+ --progress \
+ --multipart-chunk-size-mb=5 >$fifo &
+ set +e # don't allow errors to stop script
+ while read line ;do
+ echo "$line" | grep --quiet "part $stop_part "
+ if [ ${PIPESTATUS[1]} -eq 0 ] ;then
+ kill -9 $(jobs -p)
+ break
+ fi
+ done <$fifo
+ set -e
+
+ rm -f $fifo
+}
+
+mys3upload() {
+ obj=$1
+ bucket=$2
+ dest_obj=$3
+
+ mys3cmd put -q $obj s3://${bucket}/$dest_obj
+}
+
+########################################################################
+# PREP
+
+create_users
+dd if=/dev/urandom of=$big_obj bs=1M count=${big_size}
+dd if=/dev/urandom of=$huge_obj bs=1M count=${huge_size}
+touch $empty_obj
+
+quick_tests() {
+ echo TRY A SWIFT COMMAND
+ myswift upload swift-plain-ctr $big_obj --object-name swift-obj-test
+ myswift list
+ myswift list swift-plain-ctr
+
+ echo TRY A RADOSGW-ADMIN COMMAND
+ radosgw-admin bucket list # make sure rgw is up and running
+}
+
+########################################################################
+# S3 TESTS
+
+####################################
+# regular multipart test
+
+mys3cmd mb s3://multipart-bkt
+mys3upload $huge_obj multipart-bkt multipart-obj
+mys3cmd ls
+mys3cmd ls s3://multipart-bkt
+
+####################################
+# multipart test with incomplete uploads
+
+bkt="incomplete-mp-bkt-1"
+
+mys3cmd mb s3://$bkt
+
+mys3uploadkill $huge_obj $bkt incomplete-mp-obj-c $fifo 20
+
+# generate an incomplete multipart with more than 1,000 parts
+mys3uploadkill $huge_obj $bkt incomplete-mp-obj-b $fifo 1005
+
+# generate more than 1000 incomplet multiparts
+for c in $(seq 1005) ;do
+ mys3uploadkill $huge_obj $bkt incomplete-mp-obj-c-$c $fifo 3
+done
+
+####################################
+# resharded bucket
+
+bkt=resharded-bkt-1
+
+mys3cmd mb s3://$bkt
+
+for f in $(seq 8) ; do
+ dest_obj="reshard-obj-${f}"
+ mys3cmd put -q $big_obj s3://${bkt}/$dest_obj
+done
+
+radosgw-admin bucket reshard --num-shards 3 --bucket=$bkt --yes-i-really-mean-it
+radosgw-admin bucket reshard --num-shards 5 --bucket=$bkt --yes-i-really-mean-it
+
+####################################
+# versioned bucket
+
+if true ;then
+ echo "WARNING: versioned bucket test currently turned off"
+else
+ bkt=versioned-bkt-1
+
+ mys3cmd mb s3://$bkt
+
+ # bucket-enable-versioning $bkt
+
+ for f in $(seq 3) ;do
+ for g in $(seq 10) ;do
+ dest_obj="versioned-obj-${g}"
+ mys3cmd put -q $big_obj s3://${bkt}/$dest_obj
+ done
+ done
+
+ for g in $(seq 1 2 10) ;do
+ dest_obj="versioned-obj-${g}"
+ mys3cmd rm s3://${bkt}/$dest_obj
+ done
+fi
+
+############################################################
+# copy small objects
+
+o_bkt="orig-bkt-1"
+d_bkt="copy-bkt-1"
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 4) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-obj-1 s3://${d_bkt}/copied-obj-1
+mys3cmd cp s3://${o_bkt}/orig-obj-3 s3://${d_bkt}/copied-obj-3
+
+for f in $(seq 5 6) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${d_bkt}/$dest_obj
+done
+
+############################################################
+# copy small objects and delete original
+
+o_bkt="orig-bkt-2"
+d_bkt="copy-bkt-2"
+
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 4) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-obj-1 s3://${d_bkt}/copied-obj-1
+mys3cmd cp s3://${o_bkt}/orig-obj-3 s3://${d_bkt}/copied-obj-3
+
+for f in $(seq 5 6) ;do
+ dest_obj="orig-obj-$f"
+ mys3cmd put -q $big_obj s3://${d_bkt}/$dest_obj
+done
+
+mys3cmd rb --recursive s3://${o_bkt}
+
+############################################################
+# copy multipart objects
+
+o_bkt="orig-mp-bkt-3"
+d_bkt="copy-mp-bkt-3"
+
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 2) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-multipart-obj-1 \
+ s3://${d_bkt}/copied-multipart-obj-1
+
+for f in $(seq 5 5) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${d_bkt}/$dest_obj
+done
+
+
+############################################################
+# copy multipart objects and delete original
+
+o_bkt="orig-mp-bkt-4"
+d_bkt="copy-mp-bkt-4"
+
+mys3cmd mb s3://$o_bkt
+
+for f in $(seq 2) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${o_bkt}/$dest_obj
+done
+
+mys3cmd mb s3://$d_bkt
+
+mys3cmd cp s3://${o_bkt}/orig-multipart-obj-1 \
+ s3://${d_bkt}/copied-multipart-obj-1
+
+for f in $(seq 5 5) ;do
+ dest_obj="orig-multipart-obj-$f"
+ mys3cmd put -q $huge_obj s3://${d_bkt}/$dest_obj
+done
+
+mys3cmd rb --recursive s3://$o_bkt
+
+########################################################################
+# SWIFT TESTS
+
+# 600MB
+segment_size=629145600
+
+############################################################
+# plain test
+
+for f in $(seq 4) ;do
+ myswift upload swift-plain-ctr $big_obj --object-name swift-obj-$f
+done
+
+############################################################
+# zero-len test
+
+myswift upload swift-zerolen-ctr $empty_obj --object-name subdir/
+myswift upload swift-zerolen-ctr $big_obj --object-name subdir/abc1
+myswift upload swift-zerolen-ctr $empty_obj --object-name subdir/empty1
+myswift upload swift-zerolen-ctr $big_obj --object-name subdir/xyz1
+
+############################################################
+# dlo test
+
+# upload in 300MB segments
+myswift upload swift-dlo-ctr $huge_obj --object-name dlo-obj-1 \
+ -S $segment_size
+
+############################################################
+# slo test
+
+# upload in 300MB segments
+myswift upload swift-slo-ctr $huge_obj --object-name slo-obj-1 \
+ -S $segment_size --use-slo
+
+############################################################
+# large object copy test
+
+# upload in 300MB segments
+o_ctr=swift-orig-ctr
+o_obj=slo-orig-obj-1
+d_ctr=swift-copy-ctr
+d_obj=slo-copy-obj-1
+myswift upload $o_ctr $big_obj --object-name $o_obj
+
+myswift copy --destination /${d_ctr}/${d_obj} \
+ $o_ctr $o_obj
+
+myswift delete $o_ctr $o_obj
+
+############################################################
+# huge dlo object copy test
+
+o_ctr=swift-orig-dlo-ctr-1
+o_obj=dlo-orig-dlo-obj-1
+d_ctr=swift-copy-dlo-ctr-1
+d_obj=dlo-copy-dlo-obj-1
+
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size
+
+myswift copy --destination /${d_ctr}/${d_obj} \
+ $o_ctr $o_obj
+
+############################################################
+# huge dlo object copy and orig delete
+
+o_ctr=swift-orig-dlo-ctr-2
+o_obj=dlo-orig-dlo-obj-2
+d_ctr=swift-copy-dlo-ctr-2
+d_obj=dlo-copy-dlo-obj-2
+
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size
+
+myswift copy --destination /${d_ctr}/${d_obj} \
+ $o_ctr $o_obj
+
+myswift delete $o_ctr $o_obj
+
+############################################################
+# huge slo object copy test
+
+o_ctr=swift-orig-slo-ctr-1
+o_obj=slo-orig-slo-obj-1
+d_ctr=swift-copy-slo-ctr-1
+d_obj=slo-copy-slo-obj-1
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size --use-slo
+
+myswift copy --destination /${d_ctr}/${d_obj} $o_ctr $o_obj
+
+############################################################
+# huge slo object copy test and orig delete
+
+o_ctr=swift-orig-slo-ctr-2
+o_obj=slo-orig-slo-obj-2
+d_ctr=swift-copy-slo-ctr-2
+d_obj=slo-copy-slo-obj-2
+myswift upload $o_ctr $huge_obj --object-name $o_obj \
+ -S $segment_size --use-slo
+
+myswift copy --destination /${d_ctr}/${d_obj} $o_ctr $o_obj
+
+myswift delete $o_ctr $o_obj
+
+########################################################################
+# FORCE GARBAGE COLLECTION
+
+sleep 6 # since for testing age at which gc can happen is 5 secs
+radosgw-admin gc process --include-all
+
+
+########################################
+# DO ORPHAN LIST
+
+pool="default.rgw.buckets.data"
+
+rgw-orphan-list $pool
+
+# we only expect there to be one output file, but loop just in case
+ol_error=""
+for f in orphan-list-*.out ; do
+ if [ -s "$f" ] ;then # if file non-empty
+ ol_error="${ol_error}:$f"
+ echo "One ore more orphans found in $f:"
+ cat "$f"
+ fi
+done
+
+if [ -n "$ol_error" ] ;then
+ echo "ERROR: orphans found when none expected"
+ exit 1
+fi
+
+########################################################################
+# CLEAN UP
+
+rm -f $empty_obj $big_obj $huge_obj $s3config
+
+success
diff --git a/qa/workunits/rgw/test_rgw_reshard.py b/qa/workunits/rgw/test_rgw_reshard.py
new file mode 100755
index 000000000..6326e7b17
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_reshard.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python3
+
+import errno
+import time
+import logging as log
+import json
+import os
+from common import exec_cmd, boto_connect, create_user, put_objects, create_unlinked_objects
+
+"""
+Rgw manual and dynamic resharding testing against a running instance
+"""
+# The test cases in this file have been annotated for inventory.
+# To extract the inventory (in csv format) use the command:
+#
+# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+#
+#
+
+""" Constants """
+USER = 'tester'
+DISPLAY_NAME = 'Testing'
+ACCESS_KEY = 'NX5QOQKC6BH2IDN8HC7A'
+SECRET_KEY = 'LnEsqNNqZIpkzauboDcLXLcYaWwLQ3Kop0zAnKIn'
+BUCKET_NAME = 'a-bucket'
+VER_BUCKET_NAME = 'myver'
+INDEX_POOL = 'default.rgw.buckets.index'
+
+class BucketStats:
+ def __init__(self, bucket_name, bucket_id, num_objs=0, size_kb=0, num_shards=0):
+ self.bucket_name = bucket_name
+ self.bucket_id = bucket_id
+ self.num_objs = num_objs
+ self.size_kb = size_kb
+ self.num_shards = num_shards if num_shards > 0 else 1
+
+ def get_num_shards(self):
+ self.num_shards = get_bucket_num_shards(self.bucket_name, self.bucket_id)
+
+
+def get_bucket_stats(bucket_name):
+ """
+ function to get bucket stats
+ """
+ cmd = exec_cmd("radosgw-admin bucket stats --bucket {}".format(bucket_name))
+ json_op = json.loads(cmd)
+ #print(json.dumps(json_op, indent = 4, sort_keys=True))
+ bucket_id = json_op['id']
+ num_shards = json_op['num_shards']
+ if len(json_op['usage']) > 0:
+ num_objects = json_op['usage']['rgw.main']['num_objects']
+ size_kb = json_op['usage']['rgw.main']['size_kb']
+ else:
+ num_objects = 0
+ size_kb = 0
+ log.debug(" \nBUCKET_STATS: \nbucket: {} id: {} num_objects: {} size_kb: {} num_shards: {}\n".format(bucket_name, bucket_id,
+ num_objects, size_kb, num_shards))
+ return BucketStats(bucket_name, bucket_id, num_objects, size_kb, num_shards)
+
+def get_bucket_layout(bucket_name):
+ res = exec_cmd("radosgw-admin bucket layout --bucket {}".format(bucket_name))
+ return json.loads(res)
+
+def get_bucket_shard0(bucket_name):
+ bucket_id = get_bucket_stats(bucket_name).bucket_id
+ index_gen = get_bucket_layout(bucket_name)['layout']['current_index']['gen']
+ return '.dir.%s.%d.0' % (bucket_id, index_gen)
+
+def get_bucket_num_shards(bucket_name, bucket_id):
+ """
+ function to get bucket num shards
+ """
+ metadata = 'bucket.instance:' + bucket_name + ':' + bucket_id
+ cmd = exec_cmd('radosgw-admin metadata get {}'.format(metadata))
+ json_op = json.loads(cmd)
+ num_shards = json_op['data']['bucket_info']['num_shards']
+ return num_shards
+
+def run_bucket_reshard_cmd(bucket_name, num_shards, **kwargs):
+ cmd = 'radosgw-admin bucket reshard --bucket {} --num-shards {}'.format(bucket_name, num_shards)
+ cmd += ' --rgw-reshard-bucket-lock-duration 30' # reduce to minimum
+ if 'error_at' in kwargs:
+ cmd += ' --inject-error-at {}'.format(kwargs.pop('error_at'))
+ elif 'abort_at' in kwargs:
+ cmd += ' --inject-abort-at {}'.format(kwargs.pop('abort_at'))
+ if 'error_code' in kwargs:
+ cmd += ' --inject-error-code {}'.format(kwargs.pop('error_code'))
+ return exec_cmd(cmd, **kwargs)
+
+def test_bucket_reshard(conn, name, **fault):
+ # create a bucket with non-default ACLs to verify that reshard preserves them
+ bucket = conn.create_bucket(Bucket=name, ACL='authenticated-read')
+ grants = bucket.Acl().grants
+
+ objs = []
+ try:
+ # create objs
+ for i in range(0, 20):
+ objs += [bucket.put_object(Key='key' + str(i), Body=b"some_data")]
+
+ old_shard_count = get_bucket_stats(name).num_shards
+ num_shards_expected = old_shard_count + 1
+
+ # try reshard with fault injection
+ _, ret = run_bucket_reshard_cmd(name, num_shards_expected, check_retcode=False, **fault)
+
+ if fault.get('error_code') == errno.ECANCELED:
+ assert(ret == 0) # expect ECANCELED to retry and succeed
+ else:
+ assert(ret != 0 and ret != errno.EBUSY)
+
+ # check shard count
+ cur_shard_count = get_bucket_stats(name).num_shards
+ assert(cur_shard_count == old_shard_count)
+
+ # verify that the bucket is writeable by deleting an object
+ objs.pop().delete()
+
+ assert grants == bucket.Acl().grants # recheck grants after cancel
+
+ # retry reshard without fault injection. if radosgw-admin aborted,
+ # we'll have to retry until the reshard lock expires
+ while True:
+ _, ret = run_bucket_reshard_cmd(name, num_shards_expected, check_retcode=False)
+ if ret == errno.EBUSY:
+ log.info('waiting 30 seconds for reshard lock to expire...')
+ time.sleep(30)
+ continue
+ assert(ret == 0)
+ break
+
+ # recheck shard count
+ final_shard_count = get_bucket_stats(name).num_shards
+ assert(final_shard_count == num_shards_expected)
+
+ assert grants == bucket.Acl().grants # recheck grants after commit
+ finally:
+ # cleanup on resharded bucket must succeed
+ bucket.delete_objects(Delete={'Objects':[{'Key':o.key} for o in objs]})
+ bucket.delete()
+
+
+def main():
+ """
+ execute manual and dynamic resharding commands
+ """
+ create_user(USER, DISPLAY_NAME, ACCESS_KEY, SECRET_KEY)
+
+ connection = boto_connect(ACCESS_KEY, SECRET_KEY)
+
+ # create a bucket
+ bucket = connection.create_bucket(Bucket=BUCKET_NAME)
+ ver_bucket = connection.create_bucket(Bucket=VER_BUCKET_NAME)
+ connection.BucketVersioning(VER_BUCKET_NAME).enable()
+
+ bucket_acl = connection.BucketAcl(BUCKET_NAME).load()
+ ver_bucket_acl = connection.BucketAcl(VER_BUCKET_NAME).load()
+
+ # TESTCASE 'reshard-add','reshard','add','add bucket to resharding queue','succeeds'
+ log.debug('TEST: reshard add\n')
+
+ num_shards_expected = get_bucket_stats(BUCKET_NAME).num_shards + 1
+ cmd = exec_cmd('radosgw-admin reshard add --bucket {} --num-shards {}'.format(BUCKET_NAME, num_shards_expected))
+ cmd = exec_cmd('radosgw-admin reshard list')
+ json_op = json.loads(cmd)
+ log.debug('bucket name {}'.format(json_op[0]['bucket_name']))
+ assert json_op[0]['bucket_name'] == BUCKET_NAME
+ assert json_op[0]['tentative_new_num_shards'] == num_shards_expected
+
+ # TESTCASE 'reshard-process','reshard','','process bucket resharding','succeeds'
+ log.debug('TEST: reshard process\n')
+ cmd = exec_cmd('radosgw-admin reshard process')
+ time.sleep(5)
+ # check bucket shards num
+ bucket_stats1 = get_bucket_stats(BUCKET_NAME)
+ if bucket_stats1.num_shards != num_shards_expected:
+ log.error("Resharding failed on bucket {}. Expected number of shards are not created\n".format(BUCKET_NAME))
+
+ # TESTCASE 'reshard-add','reshard','add','add non empty bucket to resharding queue','succeeds'
+ log.debug('TEST: reshard add non empty bucket\n')
+ # create objs
+ num_objs = 8
+ for i in range(0, num_objs):
+ connection.Object(BUCKET_NAME, ('key'+str(i))).put(Body=b"some_data")
+
+ num_shards_expected = get_bucket_stats(BUCKET_NAME).num_shards + 1
+ cmd = exec_cmd('radosgw-admin reshard add --bucket {} --num-shards {}'.format(BUCKET_NAME, num_shards_expected))
+ cmd = exec_cmd('radosgw-admin reshard list')
+ json_op = json.loads(cmd)
+ assert json_op[0]['bucket_name'] == BUCKET_NAME
+ assert json_op[0]['tentative_new_num_shards'] == num_shards_expected
+
+ # TESTCASE 'reshard process ,'reshard','process','reshard non empty bucket','succeeds'
+ log.debug('TEST: reshard process non empty bucket\n')
+ cmd = exec_cmd('radosgw-admin reshard process')
+ # check bucket shards num
+ bucket_stats1 = get_bucket_stats(BUCKET_NAME)
+ if bucket_stats1.num_shards != num_shards_expected:
+ log.error("Resharding failed on bucket {}. Expected number of shards are not created\n".format(BUCKET_NAME))
+
+ # TESTCASE 'manual bucket resharding','inject error','fail','check bucket accessibility', 'retry reshard'
+ log.debug('TEST: reshard bucket with EIO injected at set_target_layout\n')
+ test_bucket_reshard(connection, 'error-at-set-target-layout', error_at='set_target_layout')
+ log.debug('TEST: reshard bucket with ECANCELED injected at set_target_layout\n')
+ test_bucket_reshard(connection, 'error-at-set-target-layout', error_at='set_target_layout', error_code=errno.ECANCELED)
+ log.debug('TEST: reshard bucket with abort at set_target_layout\n')
+ test_bucket_reshard(connection, 'abort-at-set-target-layout', abort_at='set_target_layout')
+
+ log.debug('TEST: reshard bucket with EIO injected at block_writes\n')
+ test_bucket_reshard(connection, 'error-at-block-writes', error_at='block_writes')
+ log.debug('TEST: reshard bucket with abort at block_writes\n')
+ test_bucket_reshard(connection, 'abort-at-block-writes', abort_at='block_writes')
+
+ log.debug('TEST: reshard bucket with EIO injected at commit_target_layout\n')
+ test_bucket_reshard(connection, 'error-at-commit-target-layout', error_at='commit_target_layout')
+ log.debug('TEST: reshard bucket with ECANCELED injected at commit_target_layout\n')
+ test_bucket_reshard(connection, 'error-at-commit-target-layout', error_at='commit_target_layout', error_code=errno.ECANCELED)
+ log.debug('TEST: reshard bucket with abort at commit_target_layout\n')
+ test_bucket_reshard(connection, 'abort-at-commit-target-layout', abort_at='commit_target_layout')
+
+ log.debug('TEST: reshard bucket with EIO injected at do_reshard\n')
+ test_bucket_reshard(connection, 'error-at-do-reshard', error_at='do_reshard')
+ log.debug('TEST: reshard bucket with abort at do_reshard\n')
+ test_bucket_reshard(connection, 'abort-at-do-reshard', abort_at='do_reshard')
+
+ # TESTCASE 'versioning reshard-','bucket', reshard','versioning reshard','succeeds'
+ log.debug(' test: reshard versioned bucket')
+ num_shards_expected = get_bucket_stats(VER_BUCKET_NAME).num_shards + 1
+ cmd = exec_cmd('radosgw-admin bucket reshard --bucket {} --num-shards {}'.format(VER_BUCKET_NAME,
+ num_shards_expected))
+ # check bucket shards num
+ ver_bucket_stats = get_bucket_stats(VER_BUCKET_NAME)
+ assert ver_bucket_stats.num_shards == num_shards_expected
+
+ # TESTCASE 'check acl'
+ new_bucket_acl = connection.BucketAcl(BUCKET_NAME).load()
+ assert new_bucket_acl == bucket_acl
+ new_ver_bucket_acl = connection.BucketAcl(VER_BUCKET_NAME).load()
+ assert new_ver_bucket_acl == ver_bucket_acl
+
+ # TESTCASE 'check reshard removes olh entries with empty name'
+ log.debug(' test: reshard removes olh entries with empty name')
+ bucket.objects.all().delete()
+
+
+ # get name of shard 0 object, add a bogus olh entry with empty name
+ bucket_shard0 = get_bucket_shard0(BUCKET_NAME)
+ if 'CEPH_ROOT' in os.environ:
+ k = '%s/qa/workunits/rgw/olh_noname_key' % os.environ['CEPH_ROOT']
+ v = '%s/qa/workunits/rgw/olh_noname_val' % os.environ['CEPH_ROOT']
+ else:
+ k = 'olh_noname_key'
+ v = 'olh_noname_val'
+ exec_cmd('rados -p %s setomapval %s --omap-key-file %s < %s' % (INDEX_POOL, bucket_shard0, k, v))
+
+ # check that bi list has one entry with empty name
+ cmd = exec_cmd('radosgw-admin bi list --bucket %s' % BUCKET_NAME)
+ json_op = json.loads(cmd.decode('utf-8', 'ignore')) # ignore utf-8 can't decode 0x80
+ assert len(json_op) == 1
+ assert json_op[0]['entry']['key']['name'] == ''
+
+ # reshard to prune the bogus olh
+ cmd = exec_cmd('radosgw-admin bucket reshard --bucket %s --num-shards %s --yes-i-really-mean-it' % (BUCKET_NAME, 1))
+
+ # get that bi list has zero entries
+ cmd = exec_cmd('radosgw-admin bi list --bucket %s' % BUCKET_NAME)
+ json_op = json.loads(cmd.decode('utf-8', 'ignore')) # ignore utf-8 can't decode 0x80
+ assert len(json_op) == 0
+
+ # TESTCASE 'check that PUT succeeds during reshard'
+ log.debug(' test: PUT succeeds during reshard')
+ num_shards = get_bucket_stats(VER_BUCKET_NAME).num_shards
+ exec_cmd('''radosgw-admin --inject-delay-at=do_reshard --inject-delay-ms=5000 \
+ bucket reshard --bucket {} --num-shards {}'''
+ .format(VER_BUCKET_NAME, num_shards + 1), wait = False)
+ time.sleep(1)
+ ver_bucket.put_object(Key='put_during_reshard', Body=b"some_data")
+ log.debug('put object successful')
+
+ # TESTCASE 'check that bucket stats are correct after reshard with unlinked entries'
+ log.debug('TEST: check that bucket stats are correct after reshard with unlinked entries\n')
+ ver_bucket.object_versions.all().delete()
+ ok_keys = ['a', 'b', 'c']
+ unlinked_keys = ['x', 'y', 'z']
+ put_objects(ver_bucket, ok_keys)
+ create_unlinked_objects(connection, ver_bucket, unlinked_keys)
+ cmd = exec_cmd(f'radosgw-admin bucket reshard --bucket {VER_BUCKET_NAME} --num-shards 17 --yes-i-really-mean-it')
+ out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {VER_BUCKET_NAME} --fix --min-age-hours 0 --rgw-olh-pending-timeout-sec 0 --dump-keys')
+ json_out = json.loads(out)
+ assert len(json_out) == len(unlinked_keys)
+ ver_bucket.object_versions.all().delete()
+ out = exec_cmd(f'radosgw-admin bucket stats --bucket {VER_BUCKET_NAME}')
+ json_out = json.loads(out)
+ log.debug(json_out['usage'])
+ assert json_out['usage']['rgw.main']['size'] == 0
+ assert json_out['usage']['rgw.main']['num_objects'] == 0
+ assert json_out['usage']['rgw.main']['size_actual'] == 0
+ assert json_out['usage']['rgw.main']['size_kb'] == 0
+ assert json_out['usage']['rgw.main']['size_kb_actual'] == 0
+ assert json_out['usage']['rgw.main']['size_kb_utilized'] == 0
+
+ # Clean up
+ log.debug("Deleting bucket {}".format(BUCKET_NAME))
+ bucket.objects.all().delete()
+ bucket.delete()
+ log.debug("Deleting bucket {}".format(VER_BUCKET_NAME))
+ ver_bucket.object_versions.all().delete()
+ ver_bucket.delete()
+
+main()
+log.info("Completed resharding tests")
diff --git a/qa/workunits/rgw/test_rgw_s3_mp_reupload.py b/qa/workunits/rgw/test_rgw_s3_mp_reupload.py
new file mode 100755
index 000000000..b3cb2d5ab
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_s3_mp_reupload.py
@@ -0,0 +1,121 @@
+import boto3
+import botocore.exceptions
+import sys
+import os
+import subprocess
+
+#boto3.set_stream_logger(name='botocore')
+
+# handles two optional system arguments:
+# <bucket-name> : default is "bkt134"
+# <0 or 1> : 0 -> upload aborted, 1 -> completed; default is completed
+
+if len(sys.argv) >= 2:
+ bucket_name = sys.argv[1]
+else:
+ bucket_name = "bkt314738362229"
+print("bucket nams is %s" % bucket_name)
+
+complete_mpu = True
+if len(sys.argv) >= 3:
+ complete_mpu = int(sys.argv[2]) > 0
+
+versioned_bucket = False
+if len(sys.argv) >= 4:
+ versioned_bucket = int(sys.argv[3]) > 0
+
+rgw_host = os.environ['RGW_HOST']
+access_key = os.environ['RGW_ACCESS_KEY']
+secret_key = os.environ['RGW_SECRET_KEY']
+
+try:
+ endpoint='http://%s:%d' % (rgw_host, 80)
+ client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+ res = client.create_bucket(Bucket=bucket_name)
+except botocore.exceptions.EndpointConnectionError:
+ try:
+ endpoint='https://%s:%d' % (rgw_host, 443)
+ client = boto3.client('s3',
+ endpoint_url=endpoint,
+ verify=False,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+ res = client.create_bucket(Bucket=bucket_name)
+ except botocore.exceptions.EndpointConnectionError:
+ endpoint='http://%s:%d' % (rgw_host, 8000)
+ client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+ res = client.create_bucket(Bucket=bucket_name)
+
+print("endpoint is %s" % endpoint)
+
+if versioned_bucket:
+ res = client.put_bucket_versioning(
+ Bucket=bucket_name,
+ VersioningConfiguration={
+ 'MFADelete': 'Disabled',
+ 'Status': 'Enabled'}
+ )
+
+key = "mpu_test4"
+nparts = 2
+ndups = 11
+do_reupload = True
+
+part_path = "/tmp/mp_part_5m"
+subprocess.run(["dd", "if=/dev/urandom", "of=" + part_path, "bs=1M", "count=5"], check=True)
+
+f = open(part_path, 'rb')
+
+res = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+mpu_id = res["UploadId"]
+
+print("start UploadId=%s" % (mpu_id))
+
+parts = []
+parts2 = []
+
+for ix in range(0,nparts):
+ part_num = ix + 1
+ f.seek(0)
+ res = client.upload_part(Body=f, Bucket=bucket_name, Key=key,
+ UploadId=mpu_id, PartNumber=part_num)
+ # save
+ etag = res['ETag']
+ part = {'ETag': etag, 'PartNumber': part_num}
+ print("phase 1 uploaded part %s" % part)
+ parts.append(part)
+
+if do_reupload:
+ # just re-upload part 1
+ part_num = 1
+ for ix in range(0,ndups):
+ f.seek(0)
+ res = client.upload_part(Body=f, Bucket=bucket_name, Key=key,
+ UploadId=mpu_id, PartNumber=part_num)
+ etag = res['ETag']
+ part = {'ETag': etag, 'PartNumber': part_num}
+ print ("phase 2 uploaded part %s" % part)
+
+ # save
+ etag = res['ETag']
+ part = {'ETag': etag, 'PartNumber': part_num}
+ parts2.append(part)
+
+if complete_mpu:
+ print("completing multipart upload, parts=%s" % parts)
+ res = client.complete_multipart_upload(
+ Bucket=bucket_name, Key=key, UploadId=mpu_id,
+ MultipartUpload={'Parts': parts})
+else:
+ print("aborting multipart upload, parts=%s" % parts)
+ res = client.abort_multipart_upload(
+ Bucket=bucket_name, Key=key, UploadId=mpu_id)
+
+# clean up
+subprocess.run(["rm", "-f", part_path], check=True)
diff --git a/qa/workunits/rgw/test_rgw_s3_mp_reupload.sh b/qa/workunits/rgw/test_rgw_s3_mp_reupload.sh
new file mode 100755
index 000000000..5d73fd048
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_s3_mp_reupload.sh
@@ -0,0 +1,110 @@
+#!/usr/bin/env bash
+
+# INITIALIZATION
+
+mydir=$(dirname $0)
+data_pool=default.rgw.buckets.data
+orphan_list_out=/tmp/orphan_list.$$
+radoslist_out=/tmp/radoslist.$$
+rados_ls_out=/tmp/rados_ls.$$
+diff_out=/tmp/diff.$$
+
+rgw_host="$(hostname --fqdn)"
+echo "INFO: fully qualified domain name: $rgw_host"
+
+export RGW_ACCESS_KEY="0555b35654ad1656d804"
+export RGW_SECRET_KEY="h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
+export RGW_HOST="${RGW_HOST:-$rgw_host}"
+
+# random argument determines if multipart is aborted or completed 50/50
+outcome=$((RANDOM % 2))
+if [ $outcome -eq 0 ] ;then
+ echo "== TESTING *ABORTING* MULTIPART UPLOAD WITH RE-UPLOADS =="
+else
+ echo "== TESTING *COMPLETING* MULTIPART UPLOAD WITH RE-UPLOADS =="
+fi
+
+# random argument determines if multipart is aborted or completed 50/50
+versioning=$((RANDOM % 2))
+if [ $versioning -eq 0 ] ;then
+ echo "== TESTING NON-VERSIONED BUCKET =="
+else
+ echo "== TESTING VERSIONED BUCKET =="
+fi
+
+# create a randomized bucket name
+bucket="reupload-bkt-$((RANDOM % 899999 + 100000))"
+
+
+# SET UP PYTHON VIRTUAL ENVIRONMENT
+
+# install boto3
+python3 -m venv $mydir
+source $mydir/bin/activate
+pip install pip --upgrade
+pip install boto3
+
+
+# CREATE RGW USER IF NECESSARY
+
+if radosgw-admin user info --access-key $RGW_ACCESS_KEY 2>/dev/null ;then
+ echo INFO: user already exists
+else
+ echo INFO: creating user
+ radosgw-admin user create --uid testid \
+ --access-key $RGW_ACCESS_KEY \
+ --secret $RGW_SECRET_KEY \
+ --display-name 'M. Tester' \
+ --email tester@ceph.com 2>/dev/null
+fi
+
+
+# RUN REUPLOAD TEST
+
+$mydir/bin/python3 ${mydir}/test_rgw_s3_mp_reupload.py $bucket $outcome $versioning
+
+
+# ANALYZE FOR ERRORS
+# (NOTE: for now we're choosing not to use the rgw-orphan-list tool)
+
+# force garbage collection to remove extra parts
+radosgw-admin gc process --include-all 2>/dev/null
+
+marker=$(radosgw-admin metadata get bucket:$bucket 2>/dev/null | grep bucket_id | sed 's/.*: "\(.*\)".*/\1/')
+
+# determine expected rados objects
+radosgw-admin bucket radoslist --bucket=$bucket 2>/dev/null | sort >$radoslist_out
+echo "radosgw-admin bucket radoslist:"
+cat $radoslist_out
+
+# determine found rados objects
+rados ls -p $data_pool 2>/dev/null | grep "^$marker" | sort >$rados_ls_out
+echo "rados ls:"
+cat $rados_ls_out
+
+# compare expected and found
+diff $radoslist_out $rados_ls_out >$diff_out
+if [ $(cat $diff_out | wc -l) -ne 0 ] ;then
+ error=1
+ echo "ERROR: Found differences between expected and actual rados objects for test bucket."
+ echo " note: indicators: '>' found but not expected; '<' expected but not found."
+ cat $diff_out
+fi
+
+
+# CLEAN UP
+
+deactivate
+
+rm -f $orphan_list_out $radoslist_out $rados_ls_out $diff_out
+
+
+# PRODUCE FINAL RESULTS
+
+if [ -n "$error" ] ;then
+ echo "== FAILED =="
+ exit 1
+fi
+
+echo "== PASSED =="
+exit 0
diff --git a/qa/workunits/rgw/test_rgw_throttle.sh b/qa/workunits/rgw/test_rgw_throttle.sh
new file mode 100755
index 000000000..f637b8f08
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_throttle.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -e
+
+ceph_test_rgw_throttle
+
+exit 0
diff --git a/qa/workunits/rgw/test_rgw_versioning.py b/qa/workunits/rgw/test_rgw_versioning.py
new file mode 100755
index 000000000..fc69e138d
--- /dev/null
+++ b/qa/workunits/rgw/test_rgw_versioning.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python3
+
+import logging as log
+import json
+import uuid
+import botocore
+import time
+from common import exec_cmd, create_user, boto_connect
+from botocore.config import Config
+
+"""
+Tests behavior of bucket versioning.
+"""
+# The test cases in this file have been annotated for inventory.
+# To extract the inventory (in csv format) use the command:
+#
+# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+#
+#
+
+""" Constants """
+USER = 'versioning-tester'
+DISPLAY_NAME = 'Versioning Testing'
+ACCESS_KEY = 'LTA662PVVDTDWX6M2AB0'
+SECRET_KEY = 'pvtchqajgzqx5581t6qbddbkj0bgf3a69qdkjcea'
+BUCKET_NAME = 'versioning-bucket'
+DATA_POOL = 'default.rgw.buckets.data'
+
+def main():
+ """
+ execute versioning tests
+ """
+ create_user(USER, DISPLAY_NAME, ACCESS_KEY, SECRET_KEY)
+
+ connection = boto_connect(ACCESS_KEY, SECRET_KEY, Config(retries = {
+ 'total_max_attempts': 1,
+ }))
+
+ # pre-test cleanup
+ try:
+ bucket = connection.Bucket(BUCKET_NAME)
+ bucket.objects.all().delete()
+ bucket.object_versions.all().delete()
+ bucket.delete()
+ except botocore.exceptions.ClientError as e:
+ if not e.response['Error']['Code'] == 'NoSuchBucket':
+ raise
+
+ bucket = connection.create_bucket(Bucket=BUCKET_NAME)
+ connection.BucketVersioning(BUCKET_NAME).enable()
+
+ # reproducer for bug from https://tracker.ceph.com/issues/59663
+ # TESTCASE 'verify that index entries and OLH objects are cleaned up after redundant deletes'
+ log.debug('TEST: verify that index entries and OLH objects are cleaned up after redundant deletes\n')
+ key = str(uuid.uuid4())
+ resp = bucket.Object(key).delete()
+ assert 'DeleteMarker' in resp, 'DeleteMarker key not present in response'
+ assert resp['DeleteMarker'], 'DeleteMarker value not True in response'
+ assert 'VersionId' in resp, 'VersionId key not present in response'
+ version_id = resp['VersionId']
+ bucket.Object(key).delete()
+ connection.ObjectVersion(bucket.name, key, version_id).delete()
+ # bucket index should now be empty
+ out = exec_cmd(f'radosgw-admin bi list --bucket {BUCKET_NAME}')
+ json_out = json.loads(out.replace(b'\x80', b'0x80'))
+ assert len(json_out) == 0, 'bucket index was not empty after all objects were deleted'
+
+ (_out, ret) = exec_cmd(f'rados -p {DATA_POOL} ls | grep {key}', check_retcode=False)
+ assert ret != 0, 'olh object was not cleaned up'
+
+ # TESTCASE 'verify that index entries and OLH objects are cleaned up after index linking error'
+ log.debug('TEST: verify that index entries and OLH objects are cleaned up after index linking error\n')
+ key = str(uuid.uuid4())
+ try:
+ exec_cmd('ceph config set client rgw_debug_inject_set_olh_err 2')
+ time.sleep(1)
+ bucket.Object(key).delete()
+ finally:
+ exec_cmd('ceph config rm client rgw_debug_inject_set_olh_err')
+ out = exec_cmd(f'radosgw-admin bi list --bucket {BUCKET_NAME}')
+ json_out = json.loads(out.replace(b'\x80', b'0x80'))
+ assert len(json_out) == 0, 'bucket index was not empty after op failed'
+ (_out, ret) = exec_cmd(f'rados -p {DATA_POOL} ls | grep {key}', check_retcode=False)
+ assert ret != 0, 'olh object was not cleaned up'
+
+ # TESTCASE 'verify that original null object version is intact after failed olh upgrade'
+ log.debug('TEST: verify that original null object version is intact after failed olh upgrade\n')
+ connection.BucketVersioning(BUCKET_NAME).suspend()
+ key = str(uuid.uuid4())
+ put_resp = bucket.put_object(Key=key, Body=b"data")
+ connection.BucketVersioning(BUCKET_NAME).enable()
+ try:
+ exec_cmd('ceph config set client rgw_debug_inject_set_olh_err 2')
+ time.sleep(1)
+ # expected to fail due to the above error injection
+ bucket.put_object(Key=key, Body=b"new data")
+ except Exception as e:
+ log.debug(e)
+ finally:
+ exec_cmd('ceph config rm client rgw_debug_inject_set_olh_err')
+ get_resp = bucket.Object(key).get()
+ assert put_resp.e_tag == get_resp['ETag'], 'get did not return null version with correct etag'
+
+ # Clean up
+ log.debug("Deleting bucket {}".format(BUCKET_NAME))
+ bucket.object_versions.all().delete()
+ bucket.delete()
+
+main()
+log.info("Completed bucket versioning tests")
diff --git a/qa/workunits/suites/blogbench.sh b/qa/workunits/suites/blogbench.sh
new file mode 100755
index 000000000..a05d8d21c
--- /dev/null
+++ b/qa/workunits/suites/blogbench.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+set -e
+
+echo "getting blogbench"
+wget http://download.ceph.com/qa/blogbench-1.0.tar.bz2
+#cp /home/gregf/src/blogbench-1.0.tar.bz2 .
+tar -xvf blogbench-1.0.tar.bz2
+cd blogbench-1.0/
+echo "making blogbench"
+./configure
+make
+cd src
+mkdir blogtest_in
+echo "running blogbench"
+./blogbench -d blogtest_in
diff --git a/qa/workunits/suites/bonnie.sh b/qa/workunits/suites/bonnie.sh
new file mode 100755
index 000000000..b60cc6a5e
--- /dev/null
+++ b/qa/workunits/suites/bonnie.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+set -e
+
+bonnie_bin=`which bonnie++`
+[ $? -eq 1 ] && bonnie_bin=/usr/sbin/bonnie++
+
+uid_flags=""
+[ "`id -u`" == "0" ] && uid_flags="-u root"
+
+$bonnie_bin $uid_flags -n 100
diff --git a/qa/workunits/suites/cephfs_journal_tool_smoke.sh b/qa/workunits/suites/cephfs_journal_tool_smoke.sh
new file mode 100755
index 000000000..3fe01ed63
--- /dev/null
+++ b/qa/workunits/suites/cephfs_journal_tool_smoke.sh
@@ -0,0 +1,92 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+
+export BIN="${BIN:-cephfs-journal-tool --rank=cephfs:0}"
+export JOURNAL_FILE=/tmp/journal.bin
+export JSON_OUTPUT=/tmp/json.tmp
+export BINARY_OUTPUT=/tmp/binary.tmp
+
+if [ -d $BINARY_OUTPUT ] ; then
+ rm -rf $BINARY_OUTPUT
+fi
+
+# Check that the import/export stuff really works as expected
+# first because it's used as the reset method between
+# following checks.
+echo "Testing that export/import cycle preserves state"
+HEADER_STATE=`$BIN header get`
+EVENT_LIST=`$BIN event get list`
+$BIN journal export $JOURNAL_FILE
+$BIN journal import $JOURNAL_FILE
+NEW_HEADER_STATE=`$BIN header get`
+NEW_EVENT_LIST=`$BIN event get list`
+
+if [ ! "$HEADER_STATE" = "$NEW_HEADER_STATE" ] ; then
+ echo "Import failed to preserve header state"
+ echo $HEADER_STATE
+ echo $NEW_HEADER_STATE
+ exit -1
+fi
+
+if [ ! "$EVENT_LIST" = "$NEW_EVENT_LIST" ] ; then
+ echo "Import failed to preserve event state"
+ echo $EVENT_LIST
+ echo $NEW_EVENT_LIST
+ exit -1
+fi
+
+echo "Testing 'journal' commands..."
+
+# Simplest thing: print the vital statistics of the journal
+$BIN journal inspect
+$BIN header get
+
+# Make a copy of the journal in its original state
+$BIN journal export $JOURNAL_FILE
+if [ ! -s $JOURNAL_FILE ] ; then
+ echo "Export to $JOURNAL_FILE failed"
+ exit -1
+fi
+
+# Can we execute a journal reset?
+$BIN journal reset
+$BIN journal inspect
+$BIN header get
+
+echo "Rolling back journal to original state..."
+$BIN journal import $JOURNAL_FILE
+
+echo "Testing 'header' commands..."
+$BIN header get
+$BIN header set write_pos 123
+$BIN header set expire_pos 123
+$BIN header set trimmed_pos 123
+
+echo "Rolling back journal to original state..."
+$BIN journal import $JOURNAL_FILE
+
+echo "Testing 'event' commands..."
+$BIN event get summary
+$BIN event get --type=UPDATE --path=/ --inode=0 --frag=0x100 summary
+$BIN event get json --path $JSON_OUTPUT
+if [ ! -s $JSON_OUTPUT ] ; then
+ echo "Export to $JSON_OUTPUT failed"
+ exit -1
+fi
+$BIN event get binary --path $BINARY_OUTPUT
+if [ ! -s $BINARY_OUTPUT ] ; then
+ echo "Export to $BINARY_OUTPUT failed"
+ exit -1
+fi
+$BIN event recover_dentries summary
+$BIN event splice summary
+
+# Tests finish.
+# Metadata objects have been modified by the 'event recover_dentries' command.
+# Journal is no long consistent with respect to metadata objects (especially inotable).
+# To ensure mds successfully replays its journal, we need to do journal reset.
+$BIN journal reset
+cephfs-table-tool all reset session
+
diff --git a/qa/workunits/suites/dbench-short.sh b/qa/workunits/suites/dbench-short.sh
new file mode 100755
index 000000000..b0da02275
--- /dev/null
+++ b/qa/workunits/suites/dbench-short.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -e
+
+dbench 1
diff --git a/qa/workunits/suites/dbench.sh b/qa/workunits/suites/dbench.sh
new file mode 100755
index 000000000..32c893b45
--- /dev/null
+++ b/qa/workunits/suites/dbench.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -e
+
+dbench 1
+dbench 10
diff --git a/qa/workunits/suites/ffsb.sh b/qa/workunits/suites/ffsb.sh
new file mode 100755
index 000000000..bf95a05c4
--- /dev/null
+++ b/qa/workunits/suites/ffsb.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+
+set -e
+
+mydir=`dirname $0`
+
+# try it again if the clone is slow and the second time
+trap -- 'retry' EXIT
+retry() {
+ rm -rf ffsb
+ # double the timeout value
+ timeout 3600 git clone https://git.ceph.com/ffsb.git --depth 1
+}
+rm -rf ffsb
+timeout 1800 git clone https://git.ceph.com/ffsb.git --depth 1
+trap - EXIT
+
+cd ffsb
+./configure
+make
+cd ..
+mkdir tmp
+cd tmp
+
+for f in $mydir/*.ffsb
+do
+ ../ffsb/ffsb $f
+done
+cd ..
+rm -r tmp ffsb*
+
diff --git a/qa/workunits/suites/fio.sh b/qa/workunits/suites/fio.sh
new file mode 100755
index 000000000..ee69de81c
--- /dev/null
+++ b/qa/workunits/suites/fio.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+
+set -x
+
+gen_fio_file() {
+ iter=$1
+ f=$2
+ cat > randio-$$-${iter}.fio <<EOF
+[randio]
+blocksize_range=32m:128m
+blocksize_unaligned=1
+filesize=10G:20G
+readwrite=randrw
+runtime=300
+size=20G
+filename=${f}
+EOF
+}
+
+sudo apt-get -y install fio
+for i in $(seq 1 20); do
+ fcount=$(ls donetestfile* 2>/dev/null | wc -l)
+ donef="foo"
+ fiof="bar"
+ if test ${fcount} -gt 0; then
+ # choose random file
+ r=$[ ${RANDOM} % ${fcount} ]
+ testfiles=( $(ls donetestfile*) )
+ donef=${testfiles[${r}]}
+ fiof=$(echo ${donef} | sed -e "s|done|fio|")
+ gen_fio_file $i ${fiof}
+ else
+ fiof=fiotestfile.$$.$i
+ donef=donetestfile.$$.$i
+ gen_fio_file $i ${fiof}
+ fi
+
+ sudo rm -f ${donef}
+ sudo fio randio-$$-$i.fio
+ sudo ln ${fiof} ${donef}
+ ls -la
+done
diff --git a/qa/workunits/suites/fsstress.sh b/qa/workunits/suites/fsstress.sh
new file mode 100755
index 000000000..e5da5b439
--- /dev/null
+++ b/qa/workunits/suites/fsstress.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+set -ex
+
+mkdir -p fsstress
+pushd fsstress
+wget -q -O ltp-full.tgz http://download.ceph.com/qa/ltp-full-20091231.tgz
+tar xzf ltp-full.tgz
+pushd ltp-full-20091231/testcases/kernel/fs/fsstress
+make
+BIN=$(readlink -f fsstress)
+popd
+popd
+
+T=$(mktemp -d -p .)
+"$BIN" -d "$T" -l 1 -n 1000 -p 10 -v
+rm -rf -- "$T"
diff --git a/qa/workunits/suites/fsx.sh b/qa/workunits/suites/fsx.sh
new file mode 100755
index 000000000..0d5ba3a58
--- /dev/null
+++ b/qa/workunits/suites/fsx.sh
@@ -0,0 +1,16 @@
+#!/bin/sh -x
+
+set -e
+
+git clone https://git.ceph.com/xfstests-dev.git
+cd xfstests-dev
+git checkout 12973fc04fd10d4af086901e10ffa8e48866b735
+make -j4
+cd ..
+cp xfstests-dev/ltp/fsx .
+
+OPTIONS="-z" # don't use zero range calls; not supported by cephfs
+
+./fsx $OPTIONS 1MB -N 50000 -p 10000 -l 1048576
+./fsx $OPTIONS 10MB -N 50000 -p 10000 -l 10485760
+./fsx $OPTIONS 100MB -N 50000 -p 10000 -l 104857600
diff --git a/qa/workunits/suites/fsync-tester.sh b/qa/workunits/suites/fsync-tester.sh
new file mode 100755
index 000000000..6e32786ea
--- /dev/null
+++ b/qa/workunits/suites/fsync-tester.sh
@@ -0,0 +1,13 @@
+#!/bin/sh -x
+
+set -e
+
+# To skirt around GPL compatibility issues:
+wget http://download.ceph.com/qa/fsync-tester.c
+gcc -D_GNU_SOURCE fsync-tester.c -o fsync-tester
+
+./fsync-tester
+
+echo $PATH
+whereis lsof
+lsof
diff --git a/qa/workunits/suites/iogen.sh b/qa/workunits/suites/iogen.sh
new file mode 100755
index 000000000..d92b87083
--- /dev/null
+++ b/qa/workunits/suites/iogen.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+set -e
+
+echo "getting iogen"
+wget http://download.ceph.com/qa/iogen_3.1p0.tar
+tar -xvzf iogen_3.1p0.tar
+cd iogen_3.1p0
+echo "making iogen"
+make
+echo "running iogen"
+./iogen -n 5 -s 2g
+echo "sleep for 10 min"
+sleep 600
+echo "stopping iogen"
+./iogen -k
+
+echo "OK"
diff --git a/qa/workunits/suites/iozone-sync.sh b/qa/workunits/suites/iozone-sync.sh
new file mode 100755
index 000000000..a37962d30
--- /dev/null
+++ b/qa/workunits/suites/iozone-sync.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -e
+
+# basic tests of O_SYNC, O_DSYNC, O_RSYNC
+# test O_SYNC
+iozone -c -e -s 512M -r 1M -t 1 -F osync1 -i 0 -i 1 -o
+# test O_DSYNC
+iozone -c -e -s 512M -r 1M -t 1 -F odsync1 -i 0 -i 1 -+D
+# test O_RSYNC
+iozone -c -e -s 512M -r 1M -t 1 -F orsync1 -i 0 -i 1 -+r
+
+# test same file with O_SYNC in one process, buffered in the other
+# the sync test starts first, so the buffered test should blow
+# past it and
+iozone -c -e -s 512M -r 1M -t 1 -F osync2 -i 0 -i 1 -o &
+sleep 1
+iozone -c -e -s 512M -r 256K -t 1 -F osync2 -i 0
+wait $!
+
+# test same file with O_SYNC from different threads
+iozone -c -e -s 512M -r 1M -t 2 -F osync3 -i 2 -o
diff --git a/qa/workunits/suites/iozone.sh b/qa/workunits/suites/iozone.sh
new file mode 100755
index 000000000..7dc50cb0b
--- /dev/null
+++ b/qa/workunits/suites/iozone.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+set -ex
+
+iozone -c -e -s 1024M -r 16K -t 1 -F f1 -i 0 -i 1
+iozone -c -e -s 1024M -r 1M -t 1 -F f2 -i 0 -i 1
+iozone -c -e -s 10240M -r 1M -t 1 -F f3 -i 0 -i 1
diff --git a/qa/workunits/suites/pjd.sh b/qa/workunits/suites/pjd.sh
new file mode 100755
index 000000000..bd72f77f2
--- /dev/null
+++ b/qa/workunits/suites/pjd.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+set -e
+
+wget http://download.ceph.com/qa/pjd-fstest-20090130-RC-aclfixes.tgz
+tar zxvf pjd*.tgz
+cd pjd-fstest-20090130-RC
+make clean
+make
+cd ..
+mkdir tmp
+cd tmp
+# must be root!
+sudo prove -r -v --exec 'bash -x' ../pjd*/tests
+cd ..
+rm -rf tmp pjd*
+
diff --git a/qa/workunits/suites/random_write.32.ffsb b/qa/workunits/suites/random_write.32.ffsb
new file mode 100644
index 000000000..ba83e470f
--- /dev/null
+++ b/qa/workunits/suites/random_write.32.ffsb
@@ -0,0 +1,48 @@
+# Large file random writes.
+# 1024 files, 100MB per file.
+
+time=300 # 5 min
+alignio=1
+
+[filesystem0]
+ location=.
+ num_files=128
+ min_filesize=104857600 # 100 MB
+ max_filesize=104857600
+ reuse=1
+[end0]
+
+[threadgroup0]
+ num_threads=32
+
+ write_random=1
+ write_weight=1
+
+ write_size=5242880 # 5 MB
+ write_blocksize=4096
+
+ [stats]
+ enable_stats=1
+ enable_range=1
+
+ msec_range 0.00 0.01
+ msec_range 0.01 0.02
+ msec_range 0.02 0.05
+ msec_range 0.05 0.10
+ msec_range 0.10 0.20
+ msec_range 0.20 0.50
+ msec_range 0.50 1.00
+ msec_range 1.00 2.00
+ msec_range 2.00 5.00
+ msec_range 5.00 10.00
+ msec_range 10.00 20.00
+ msec_range 20.00 50.00
+ msec_range 50.00 100.00
+ msec_range 100.00 200.00
+ msec_range 200.00 500.00
+ msec_range 500.00 1000.00
+ msec_range 1000.00 2000.00
+ msec_range 2000.00 5000.00
+ msec_range 5000.00 10000.00
+ [end]
+[end0]
diff --git a/qa/workunits/test_telemetry_pacific.sh b/qa/workunits/test_telemetry_pacific.sh
new file mode 100755
index 000000000..a971f5883
--- /dev/null
+++ b/qa/workunits/test_telemetry_pacific.sh
@@ -0,0 +1,23 @@
+#!/bin/bash -ex
+
+# Set up ident details for cluster
+ceph config set mgr mgr/telemetry/channel_ident true
+ceph config set mgr mgr/telemetry/organization 'ceph-qa'
+ceph config set mgr mgr/telemetry/description 'upgrade test cluster'
+
+# Opt-in
+ceph telemetry on --license sharing-1-0
+
+# Check last_opt_revision
+LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
+if [ $LAST_OPT_REVISION -ne 3 ]; then
+ echo "last_opt_revision is incorrect."
+ exit 1
+fi
+
+# Check reports
+ceph telemetry show
+ceph telemetry show-device
+ceph telemetry show-all
+
+echo OK
diff --git a/qa/workunits/test_telemetry_pacific_x.sh b/qa/workunits/test_telemetry_pacific_x.sh
new file mode 100755
index 000000000..0e4a832db
--- /dev/null
+++ b/qa/workunits/test_telemetry_pacific_x.sh
@@ -0,0 +1,59 @@
+#!/bin/bash -ex
+
+# Assert that we're still opted in
+LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
+if [ $LAST_OPT_REVISION -ne 3 ]; then
+ echo "last_opt_revision is incorrect"
+ exit 1
+fi
+
+# Check the warning:
+STATUS=$(ceph -s)
+if ! [[ $STATUS == *"Telemetry requires re-opt-in"* ]]
+then
+ echo "STATUS does not contain re-opt-in warning"
+ exit 1
+fi
+
+# Check new collections
+COLLECTIONS=$(ceph telemetry collection ls)
+NEW_COLLECTIONS=("perf_perf" "basic_mds_metadata" "basic_pool_usage" "basic_rook_v01" "perf_memory_metrics")
+for col in ${NEW_COLLECTIONS[@]}; do
+ if ! [[ $COLLECTIONS == *$col* ]];
+ then
+ echo "COLLECTIONS does not contain" "'"$col"'."
+ exit 1
+ fi
+done
+
+# Run preview commands
+ceph telemetry preview
+ceph telemetry preview-device
+ceph telemetry preview-all
+
+# Opt in to new collections
+ceph telemetry on --license sharing-1-0
+ceph telemetry enable channel perf
+
+# Check the warning:
+timeout=60
+STATUS=$(ceph -s)
+until [[ $STATUS != *"Telemetry requires re-opt-in"* ]] || [ $timeout -le 0 ]; do
+ STATUS=$(ceph -s)
+ sleep 1
+ timeout=$(( timeout - 1 ))
+done
+if [ $timeout -le 0 ]; then
+ echo "STATUS should not contain re-opt-in warning at this point"
+ exit 1
+fi
+
+# Run show commands
+ceph telemetry show
+ceph telemetry show-device
+ceph telemetry show
+
+# Opt out
+ceph telemetry off
+
+echo OK
diff --git a/qa/workunits/test_telemetry_quincy.sh b/qa/workunits/test_telemetry_quincy.sh
new file mode 100755
index 000000000..e8b07ec13
--- /dev/null
+++ b/qa/workunits/test_telemetry_quincy.sh
@@ -0,0 +1,44 @@
+#!/bin/bash -ex
+
+# Set up ident details for cluster
+ceph config set mgr mgr/telemetry/channel_ident true
+ceph config set mgr mgr/telemetry/organization 'ceph-qa'
+ceph config set mgr mgr/telemetry/description 'upgrade test cluster'
+
+
+#Run preview commands
+ceph telemetry preview
+ceph telemetry preview-device
+ceph telemetry preview-all
+
+# Assert that new collections are available
+COLLECTIONS=$(ceph telemetry collection ls)
+NEW_COLLECTIONS=("perf_perf" "basic_mds_metadata" "basic_pool_usage" "basic_rook_v01" "perf_memory_metrics")
+for col in ${NEW_COLLECTIONS[@]}; do
+ if ! [[ $COLLECTIONS == *$col* ]];
+ then
+ echo "COLLECTIONS does not contain" "'"$col"'."
+ exit 1
+ fi
+done
+
+# Opt-in
+ceph telemetry on --license sharing-1-0
+
+# Enable perf channel
+ceph telemetry enable channel perf
+
+# For quincy, the last_opt_revision remains at 1 since last_opt_revision
+# was phased out for fresh installs of quincy.
+LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
+if [ $LAST_OPT_REVISION -ne 1 ]; then
+ echo "last_opt_revision is incorrect"
+ exit 1
+fi
+
+# Run show commands
+ceph telemetry show
+ceph telemetry show-device
+ceph telemetry show-all
+
+echo OK
diff --git a/qa/workunits/test_telemetry_quincy_x.sh b/qa/workunits/test_telemetry_quincy_x.sh
new file mode 100755
index 000000000..4734132d0
--- /dev/null
+++ b/qa/workunits/test_telemetry_quincy_x.sh
@@ -0,0 +1,40 @@
+#!/bin/bash -ex
+
+# For quincy, the last_opt_revision remains at 1 since last_opt_revision
+# was phased out for fresh installs of quincy.
+LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
+if [ $LAST_OPT_REVISION -ne 1 ]; then
+ echo "last_opt_revision is incorrect"
+ exit 1
+fi
+
+# Check the warning:
+ceph -s
+
+COLLECTIONS=$(ceph telemetry collection ls)
+NEW_COLLECTIONS=("perf_perf" "basic_mds_metadata" "basic_pool_usage" "basic_rook_v01" "perf_memory_metrics")
+for col in ${NEW_COLLECTIONS[@]}; do
+ if ! [[ $COLLECTIONS == *$col* ]];
+ then
+ echo "COLLECTIONS does not contain" "'"$col"'."
+ exit 1
+ fi
+done
+
+#Run preview commands
+ceph telemetry preview
+ceph telemetry preview-device
+ceph telemetry preview-all
+
+# Opt in to new collections
+# Currently, no new collections between latest quincy and reef (dev)
+
+# Run show commands
+ceph telemetry show
+ceph telemetry show-device
+ceph telemetry show
+
+# Opt out
+ceph telemetry off
+
+echo OK
diff --git a/qa/workunits/true.sh b/qa/workunits/true.sh
new file mode 100755
index 000000000..296ef781c
--- /dev/null
+++ b/qa/workunits/true.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+true
diff --git a/qa/workunits/windows/libvirt_vm/autounattend.xml b/qa/workunits/windows/libvirt_vm/autounattend.xml
new file mode 100644
index 000000000..c3cdf3171
--- /dev/null
+++ b/qa/workunits/windows/libvirt_vm/autounattend.xml
@@ -0,0 +1,157 @@
+<?xml version="1.0" encoding="utf-8"?>
+<unattend xmlns="urn:schemas-microsoft-com:unattend">
+
+ <settings pass="windowsPE">
+
+ <component name="Microsoft-Windows-International-Core-WinPE" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS" xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <SetupUILanguage>
+ <UILanguage>en-US</UILanguage>
+ </SetupUILanguage>
+ <SystemLocale>en-US</SystemLocale>
+ <UILanguage>en-US</UILanguage>
+ <UserLocale>en-US</UserLocale>
+ </component>
+
+ <component name="Microsoft-Windows-Setup" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS" xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+
+ <DiskConfiguration>
+ <WillShowUI>OnError</WillShowUI>
+ <Disk wcm:action="add">
+ <CreatePartitions>
+ <CreatePartition wcm:action="add">
+ <Order>1</Order>
+ <Size>100</Size>
+ <Type>Primary</Type>
+ </CreatePartition>
+ <CreatePartition wcm:action="add">
+ <Order>2</Order>
+ <Extend>true</Extend>
+ <Type>Primary</Type>
+ </CreatePartition>
+ </CreatePartitions>
+ <ModifyPartitions>
+ <ModifyPartition wcm:action="add">
+ <Active>true</Active>
+ <Label>Boot</Label>
+ <Format>NTFS</Format>
+ <Order>1</Order>
+ <PartitionID>1</PartitionID>
+ </ModifyPartition>
+ <ModifyPartition wcm:action="add">
+ <Format>NTFS</Format>
+ <Order>2</Order>
+ <PartitionID>2</PartitionID>
+ <Label>System</Label>
+ </ModifyPartition>
+ </ModifyPartitions>
+ <DiskID>0</DiskID>
+ <WillWipeDisk>true</WillWipeDisk>
+ </Disk>
+ </DiskConfiguration>
+
+ <ImageInstall>
+ <OSImage>
+ <InstallTo>
+ <PartitionID>2</PartitionID>
+ <DiskID>0</DiskID>
+ </InstallTo>
+ <InstallToAvailablePartition>false</InstallToAvailablePartition>
+ <WillShowUI>OnError</WillShowUI>
+ <InstallFrom>
+ <MetaData wcm:action="add">
+ <Key>/IMAGE/NAME</Key>
+ <Value>Windows Server 2019 SERVERSTANDARDCORE</Value>
+ </MetaData>
+ </InstallFrom>
+ </OSImage>
+ </ImageInstall>
+
+ <UserData>
+ <!-- Product Key from http://technet.microsoft.com/en-us/library/jj612867.aspx -->
+ <ProductKey>
+ <!-- Do not uncomment the Key element if you are using trial ISOs -->
+ <!-- You must uncomment the Key element (and optionally insert your own key) if you are using retail or volume license ISOs -->
+ <!-- <Key></Key> -->
+ <WillShowUI>OnError</WillShowUI>
+ </ProductKey>
+ <AcceptEula>true</AcceptEula>
+ </UserData>
+
+ </component>
+
+ <component name="Microsoft-Windows-PnpCustomizationsWinPE" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS" xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <DriverPaths>
+ <PathAndCredentials wcm:action="add" wcm:keyValue="1">
+ <Path>E:\NetKVM\2k19\amd64\</Path>
+ </PathAndCredentials>
+ <PathAndCredentials wcm:action="add" wcm:keyValue="2">
+ <Path>E:\viostor\2k19\amd64\</Path>
+ </PathAndCredentials>
+ <PathAndCredentials wcm:action="add" wcm:keyValue="3">
+ <Path>E:\vioserial\2k19\amd64\</Path>
+ </PathAndCredentials>
+ </DriverPaths>
+ </component>
+
+ </settings>
+
+ <settings pass="oobeSystem">
+ <component name="Microsoft-Windows-Shell-Setup" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS" xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+
+ <VisualEffects>
+ <FontSmoothing>ClearType</FontSmoothing>
+ </VisualEffects>
+
+ <UserAccounts>
+ <!--
+ Password to be used only during initial provisioning.
+ Must be reset with final Sysprep.
+ -->
+ <AdministratorPassword>
+ <Value>Passw0rd</Value>
+ <PlainText>true</PlainText>
+ </AdministratorPassword>
+ </UserAccounts>
+
+ <AutoLogon>
+ <Password>
+ <Value>Passw0rd</Value>
+ <PlainText>true</PlainText>
+ </Password>
+ <Enabled>true</Enabled>
+ <Username>Administrator</Username>
+ </AutoLogon>
+
+ <ComputerName>*</ComputerName>
+
+ <OOBE>
+ <NetworkLocation>Work</NetworkLocation>
+ <HideEULAPage>true</HideEULAPage>
+ <ProtectYourPC>3</ProtectYourPC>
+ <SkipMachineOOBE>true</SkipMachineOOBE>
+ <SkipUserOOBE>true</SkipUserOOBE>
+ </OOBE>
+
+ <FirstLogonCommands>
+
+ <SynchronousCommand wcm:action="add">
+ <CommandLine>%SystemRoot%\System32\WindowsPowerShell\v1.0\powershell -NoLogo -NonInteractive -ExecutionPolicy RemoteSigned -File A:\first-logon.ps1</CommandLine>
+ <Order>1</Order>
+ </SynchronousCommand>
+
+ </FirstLogonCommands>
+
+ </component>
+
+ </settings>
+
+ <settings pass="specialize">
+
+ <component name="Microsoft-Windows-Shell-Setup" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS" xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <TimeZone>UTC</TimeZone>
+ <ComputerName>*</ComputerName>
+ </component>
+
+ </settings>
+
+</unattend>
diff --git a/qa/workunits/windows/libvirt_vm/first-logon.ps1 b/qa/workunits/windows/libvirt_vm/first-logon.ps1
new file mode 100644
index 000000000..654b836bb
--- /dev/null
+++ b/qa/workunits/windows/libvirt_vm/first-logon.ps1
@@ -0,0 +1,42 @@
+$ErrorActionPreference = "Stop"
+
+. "${PSScriptRoot}\utils.ps1"
+
+$VIRTIO_WIN_PATH = "E:\"
+
+# Install QEMU quest agent
+Write-Output "Installing QEMU guest agent"
+$p = Start-Process -FilePath "msiexec.exe" -ArgumentList @("/i", "${VIRTIO_WIN_PATH}\guest-agent\qemu-ga-x86_64.msi", "/qn") -NoNewWindow -PassThru -Wait
+if($p.ExitCode) {
+ Throw "The QEMU guest agent installation failed. Exit code: $($p.ExitCode)"
+}
+Write-Output "Successfully installed QEMU guest agent"
+
+# Install OpenSSH server
+Start-ExecuteWithRetry {
+ Get-WindowsCapability -Online -Name OpenSSH* | Add-WindowsCapability -Online
+}
+
+# Start OpenSSH server
+Set-Service -Name "sshd" -StartupType Automatic
+Start-Service -Name "sshd"
+
+# Set PowerShell as default SSH shell
+New-ItemProperty -PropertyType String -Force -Name DefaultShell -Path "HKLM:\SOFTWARE\OpenSSH" -Value (Get-Command powershell.exe).Source
+
+# Create SSH firewall rule
+New-NetFirewallRule -Name "sshd" -DisplayName 'OpenSSH Server (sshd)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22
+
+# Authorize the SSH key
+$authorizedKeysFile = Join-Path $env:ProgramData "ssh\administrators_authorized_keys"
+Set-Content -Path $authorizedKeysFile -Value (Get-Content "${PSScriptRoot}\id_rsa.pub") -Encoding ascii
+$acl = Get-Acl $authorizedKeysFile
+$acl.SetAccessRuleProtection($true, $false)
+$administratorsRule = New-Object system.security.accesscontrol.filesystemaccessrule("Administrators", "FullControl", "Allow")
+$systemRule = New-Object system.security.accesscontrol.filesystemaccessrule("SYSTEM", "FullControl", "Allow")
+$acl.SetAccessRule($administratorsRule)
+$acl.SetAccessRule($systemRule)
+$acl | Set-Acl
+
+# Reboot the machine to complete first logon process
+Restart-Computer -Force -Confirm:$false
diff --git a/qa/workunits/windows/libvirt_vm/setup.ps1 b/qa/workunits/windows/libvirt_vm/setup.ps1
new file mode 100644
index 000000000..550fb274e
--- /dev/null
+++ b/qa/workunits/windows/libvirt_vm/setup.ps1
@@ -0,0 +1,43 @@
+$ErrorActionPreference = "Stop"
+$ProgressPreference = "SilentlyContinue"
+
+$PYTHON3_URL = "https://www.python.org/ftp/python/3.10.4/python-3.10.4-amd64.exe"
+$FIO_URL = "https://bsdio.com/fio/releases/fio-3.27-x64.msi"
+$VC_REDIST_URL = "https://aka.ms/vs/17/release/vc_redist.x64.exe"
+
+. "${PSScriptRoot}\utils.ps1"
+
+function Install-VCRedist {
+ Write-Output "Installing Visual Studio Redistributable x64"
+ Install-Tool -URL $VC_REDIST_URL -Params @("/quiet", "/norestart")
+ Write-Output "Successfully installed Visual Studio Redistributable x64"
+}
+
+function Install-Python3 {
+ Write-Output "Installing Python3"
+ Install-Tool -URL $PYTHON3_URL -Params @("/quiet", "InstallAllUsers=1", "PrependPath=1")
+ Add-ToPathEnvVar -Path @("${env:ProgramFiles}\Python310\", "${env:ProgramFiles}\Python310\Scripts\")
+ Write-Output "Installing pip dependencies"
+ Start-ExecuteWithRetry {
+ Invoke-CommandLine "pip3.exe" "install prettytable"
+ }
+ Write-Output "Successfully installed Python3"
+}
+
+function Install-FIO {
+ Write-Output "Installing FIO"
+ Install-Tool -URL $FIO_URL -Params @("/qn", "/l*v", "$env:TEMP\fio-install.log", "/norestart")
+ Write-Output "Successfully installed FIO"
+}
+
+Install-VCRedist
+Install-Python3
+Install-FIO
+
+# Pre-append WNBD and Ceph to PATH
+Add-ToPathEnvVar -Path @(
+ "${env:SystemDrive}\wnbd\binaries",
+ "${env:SystemDrive}\ceph")
+
+# This will refresh the PATH for new SSH sessions
+Restart-Service -Force -Name "sshd"
diff --git a/qa/workunits/windows/libvirt_vm/setup.sh b/qa/workunits/windows/libvirt_vm/setup.sh
new file mode 100755
index 000000000..51e91ec42
--- /dev/null
+++ b/qa/workunits/windows/libvirt_vm/setup.sh
@@ -0,0 +1,162 @@
+#!/usr/bin/env bash
+set -ex
+
+WINDOWS_SERVER_2019_ISO_URL=${WINDOWS_SERVER_2019_ISO_URL:-"https://software-download.microsoft.com/download/pr/17763.737.190906-2324.rs5_release_svc_refresh_SERVER_EVAL_x64FRE_en-us_1.iso"}
+VIRTIO_WIN_ISO_URL=${VIRTIO_WIN_ISO_URL:-"https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/stable-virtio/virtio-win.iso"}
+
+DIR="$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)"
+
+# Use build_utils.sh from ceph-build
+curl --retry-max-time 30 --retry 10 -L -o ${DIR}/build_utils.sh https://raw.githubusercontent.com/ceph/ceph-build/main/scripts/build_utils.sh
+source ${DIR}/build_utils.sh
+
+# Helper function to restart the Windows VM
+function restart_windows_vm() {
+ echo "Restarting Windows VM"
+ ssh_exec "cmd.exe /c 'shutdown.exe /r /t 0 & sc.exe stop sshd'"
+ SECONDS=0
+ TIMEOUT=${1:-600}
+ while true; do
+ if [[ $SECONDS -gt $TIMEOUT ]]; then
+ echo "Timeout waiting for the VM to start"
+ exit 1
+ fi
+ ssh_exec hostname || {
+ echo "Cannot execute SSH commands yet"
+ sleep 10
+ continue
+ }
+ break
+ done
+ echo "Windows VM restarted"
+}
+
+# Install libvirt with KVM
+retrycmd_if_failure 5 0 5m sudo apt-get update
+retrycmd_if_failure 5 0 10m sudo apt-get install -y qemu-kvm libvirt-daemon-system libvirt-clients virtinst
+
+# Download ISO images
+echo "Downloading virtio-win ISO"
+retrycmd_if_failure 5 0 30m curl -C - -L $VIRTIO_WIN_ISO_URL -o ${DIR}/virtio-win.iso
+echo "Downloading Windows Server 2019 ISO"
+retrycmd_if_failure 5 0 60m curl -C - -L $WINDOWS_SERVER_2019_ISO_URL -o ${DIR}/windows-server-2019.iso
+
+# Create virtual floppy image with the unattended instructions to install Windows Server 2019
+echo "Creating floppy image"
+qemu-img create -f raw ${DIR}/floppy.img 1440k
+mkfs.msdos -s 1 ${DIR}/floppy.img
+mkdir ${DIR}/floppy
+sudo mount ${DIR}/floppy.img ${DIR}/floppy
+ssh-keygen -b 2048 -t rsa -f ${DIR}/id_rsa -q -N ""
+sudo cp \
+ ${DIR}/autounattend.xml \
+ ${DIR}/first-logon.ps1 \
+ ${DIR}/id_rsa.pub \
+ ${DIR}/utils.ps1 \
+ ${DIR}/setup.ps1 \
+ ${DIR}/floppy/
+sudo umount ${DIR}/floppy
+rmdir ${DIR}/floppy
+
+echo "Starting libvirt VM"
+qemu-img create -f qcow2 ${DIR}/ceph-win-ltsc2019.qcow2 50G
+VM_NAME="ceph-win-ltsc2019"
+sudo virt-install \
+ --name $VM_NAME \
+ --os-variant win2k19 \
+ --boot hd,cdrom \
+ --virt-type kvm \
+ --graphics spice \
+ --cpu host \
+ --vcpus 4 \
+ --memory 4096 \
+ --disk ${DIR}/floppy.img,device=floppy \
+ --disk ${DIR}/ceph-win-ltsc2019.qcow2,bus=virtio \
+ --disk ${DIR}/windows-server-2019.iso,device=cdrom \
+ --disk ${DIR}/virtio-win.iso,device=cdrom \
+ --network network=default,model=virtio \
+ --controller type=virtio-serial \
+ --channel unix,target_type=virtio,name=org.qemu.guest_agent.0 \
+ --noautoconsol
+
+export SSH_USER="administrator"
+export SSH_KNOWN_HOSTS_FILE="${DIR}/known_hosts"
+export SSH_KEY="${DIR}/id_rsa"
+
+SECONDS=0
+TIMEOUT=1800
+SLEEP_SECS=30
+while true; do
+ if [[ $SECONDS -gt $TIMEOUT ]]; then
+ echo "Timeout waiting for the VM to start"
+ exit 1
+ fi
+ VM_IP=$(sudo virsh domifaddr --source agent --interface Ethernet --full $VM_NAME | grep ipv4 | awk '{print $4}' | cut -d '/' -f1) || {
+ echo "Retrying in $SLEEP_SECS seconds"
+ sleep $SLEEP_SECS
+ continue
+ }
+ ssh-keyscan -H $VM_IP &> $SSH_KNOWN_HOSTS_FILE || {
+ echo "SSH is not reachable yet"
+ sleep $SLEEP_SECS
+ continue
+ }
+ SSH_ADDRESS=$VM_IP ssh_exec hostname || {
+ echo "Cannot execute SSH commands yet"
+ sleep $SLEEP_SECS
+ continue
+ }
+ break
+done
+export SSH_ADDRESS=$VM_IP
+
+scp_upload ${DIR}/utils.ps1 /utils.ps1
+scp_upload ${DIR}/setup.ps1 /setup.ps1
+SSH_TIMEOUT=1h ssh_exec /setup.ps1
+
+cd $DIR
+
+# Get the helper script to download Chacra builds
+retrycmd_if_failure 10 5 1m curl -L -o ./get-chacra-bin.py https://raw.githubusercontent.com/ceph/ceph-win32-tests/main/get-bin.py
+chmod +x ./get-chacra-bin.py
+
+# Download latest WNBD build from Chacra
+retrycmd_if_failure 10 0 10m ./get-chacra-bin.py --project wnbd --filename wnbd.zip
+scp_upload wnbd.zip /wnbd.zip
+ssh_exec tar.exe xzvf /wnbd.zip -C /
+
+# Install WNBD driver
+ssh_exec Import-Certificate -FilePath /wnbd/driver/wnbd.cer -Cert Cert:\\LocalMachine\\Root
+ssh_exec Import-Certificate -FilePath /wnbd/driver/wnbd.cer -Cert Cert:\\LocalMachine\\TrustedPublisher
+ssh_exec /wnbd/binaries/wnbd-client.exe install-driver /wnbd/driver/wnbd.inf
+restart_windows_vm
+ssh_exec wnbd-client.exe -v
+
+# Download Ceph Windows build from Chacra
+CEPH_REPO_FILE="/etc/apt/sources.list.d/ceph.list"
+PROJECT=$(cat $CEPH_REPO_FILE | cut -d ' ' -f3 | tr '\/', ' ' | awk '{print $4}')
+BRANCH=$(cat $CEPH_REPO_FILE | cut -d ' ' -f3 | tr '\/', ' ' | awk '{print $5}')
+SHA1=$(cat $CEPH_REPO_FILE | cut -d ' ' -f3 | tr '\/', ' ' | awk '{print $6}')
+retrycmd_if_failure 10 0 10m ./get-chacra-bin.py --project $PROJECT --branchname $BRANCH --sha1 $SHA1 --filename ceph.zip
+
+# Install Ceph on Windows
+SSH_TIMEOUT=5m scp_upload ./ceph.zip /ceph.zip
+SSH_TIMEOUT=10m ssh_exec tar.exe xzvf /ceph.zip -C /
+ssh_exec "New-Service -Name ceph-rbd -BinaryPathName 'c:\ceph\rbd-wnbd.exe service'"
+ssh_exec Start-Service -Name ceph-rbd
+ssh_exec rbd.exe -v
+
+# Setup Ceph configs and directories
+ssh_exec mkdir -force /etc/ceph, /var/run/ceph, /var/log/ceph
+for i in $(ls /etc/ceph); do
+ scp_upload /etc/ceph/$i /etc/ceph/$i
+done
+
+cat << EOF > ${DIR}/connection_info.sh
+export SSH_USER="${SSH_USER}"
+export SSH_KNOWN_HOSTS_FILE="${SSH_KNOWN_HOSTS_FILE}"
+export SSH_KEY="${SSH_KEY}"
+export SSH_ADDRESS="${SSH_ADDRESS}"
+EOF
+
+echo "Windows Server 2019 libvirt testing VM is ready"
diff --git a/qa/workunits/windows/libvirt_vm/utils.ps1 b/qa/workunits/windows/libvirt_vm/utils.ps1
new file mode 100644
index 000000000..f29ab79f5
--- /dev/null
+++ b/qa/workunits/windows/libvirt_vm/utils.ps1
@@ -0,0 +1,130 @@
+function Invoke-CommandLine {
+ Param(
+ [Parameter(Mandatory=$true)]
+ [String]$Command,
+ [String]$Arguments,
+ [Int[]]$AllowedExitCodes=@(0)
+ )
+ & $Command $Arguments.Split(" ")
+ if($LASTEXITCODE -notin $AllowedExitCodes) {
+ Throw "$Command $Arguments returned a non zero exit code ${LASTEXITCODE}."
+ }
+}
+
+function Start-ExecuteWithRetry {
+ Param(
+ [Parameter(Mandatory=$true)]
+ [ScriptBlock]$ScriptBlock,
+ [Int]$MaxRetryCount=10,
+ [Int]$RetryInterval=3,
+ [String]$RetryMessage,
+ [Array]$ArgumentList=@()
+ )
+ $currentErrorActionPreference = $ErrorActionPreference
+ $ErrorActionPreference = "Continue"
+ $retryCount = 0
+ while ($true) {
+ try {
+ $res = Invoke-Command -ScriptBlock $ScriptBlock -ArgumentList $ArgumentList
+ $ErrorActionPreference = $currentErrorActionPreference
+ return $res
+ } catch [System.Exception] {
+ $retryCount++
+ if ($retryCount -gt $MaxRetryCount) {
+ $ErrorActionPreference = $currentErrorActionPreference
+ Throw $_
+ } else {
+ $prefixMsg = "Retry(${retryCount}/${MaxRetryCount})"
+ if($RetryMessage) {
+ Write-Host "${prefixMsg} - $RetryMessage"
+ } elseif($_) {
+ Write-Host "${prefixMsg} - $($_.ToString())"
+ }
+ Start-Sleep $RetryInterval
+ }
+ }
+ }
+}
+
+function Start-FileDownload {
+ Param(
+ [Parameter(Mandatory=$true)]
+ [String]$URL,
+ [Parameter(Mandatory=$true)]
+ [String]$Destination,
+ [Int]$RetryCount=10
+ )
+ Write-Output "Downloading $URL to $Destination"
+ Start-ExecuteWithRetry `
+ -ScriptBlock { Invoke-CommandLine -Command "curl.exe" -Arguments "-L -s -o $Destination $URL" } `
+ -MaxRetryCount $RetryCount `
+ -RetryMessage "Failed to download '${URL}'. Retrying"
+ Write-Output "Successfully downloaded."
+}
+
+function Add-ToPathEnvVar {
+ Param(
+ [Parameter(Mandatory=$true)]
+ [String[]]$Path,
+ [Parameter(Mandatory=$false)]
+ [ValidateSet([System.EnvironmentVariableTarget]::User, [System.EnvironmentVariableTarget]::Machine)]
+ [System.EnvironmentVariableTarget]$Target=[System.EnvironmentVariableTarget]::Machine
+ )
+ $pathEnvVar = [Environment]::GetEnvironmentVariable("PATH", $Target).Split(';')
+ $currentSessionPath = $env:PATH.Split(';')
+ foreach($p in $Path) {
+ if($p -notin $pathEnvVar) {
+ $pathEnvVar += $p
+ }
+ if($p -notin $currentSessionPath) {
+ $currentSessionPath += $p
+ }
+ }
+ $env:PATH = $currentSessionPath -join ';'
+ $newPathEnvVar = $pathEnvVar -join ';'
+ [Environment]::SetEnvironmentVariable("PATH", $newPathEnvVar, $Target)
+}
+
+function Install-Tool {
+ [CmdletBinding(DefaultParameterSetName = "URL")]
+ Param(
+ [Parameter(Mandatory=$true, ParameterSetName = "URL")]
+ [String]$URL,
+ [Parameter(Mandatory=$true, ParameterSetName = "LocalPath")]
+ [String]$LocalPath,
+ [Parameter(ParameterSetName = "URL")]
+ [Parameter(ParameterSetName = "LocalPath")]
+ [String[]]$Params=@(),
+ [Parameter(ParameterSetName = "URL")]
+ [Parameter(ParameterSetName = "LocalPath")]
+ [Int[]]$AllowedExitCodes=@(0)
+ )
+ PROCESS {
+ $installerPath = $LocalPath
+ if($PSCmdlet.ParameterSetName -eq "URL") {
+ $installerPath = Join-Path $env:TEMP $URL.Split('/')[-1]
+ Start-FileDownload -URL $URL -Destination $installerPath
+ }
+ Write-Output "Installing ${installerPath}"
+ $kwargs = @{
+ "FilePath" = $installerPath
+ "ArgumentList" = $Params
+ "NoNewWindow" = $true
+ "PassThru" = $true
+ "Wait" = $true
+ }
+ if((Get-ChildItem $installerPath).Extension -eq '.msi') {
+ $kwargs["FilePath"] = "msiexec.exe"
+ $kwargs["ArgumentList"] = @("/i", $installerPath) + $Params
+ }
+ $p = Start-Process @kwargs
+ if($p.ExitCode -notin $AllowedExitCodes) {
+ Throw "Installation failed. Exit code: $($p.ExitCode)"
+ }
+ if($PSCmdlet.ParameterSetName -eq "URL") {
+ Start-ExecuteWithRetry `
+ -ScriptBlock { Remove-Item -Force -Path $installerPath -ErrorAction Stop } `
+ -RetryMessage "Failed to remove ${installerPath}. Retrying"
+ }
+ }
+}
diff --git a/qa/workunits/windows/run-tests.ps1 b/qa/workunits/windows/run-tests.ps1
new file mode 100644
index 000000000..6d818f426
--- /dev/null
+++ b/qa/workunits/windows/run-tests.ps1
@@ -0,0 +1,29 @@
+$ProgressPreference = "SilentlyContinue"
+$ErrorActionPreference = "Stop"
+
+$scriptLocation = [System.IO.Path]::GetDirectoryName(
+ $myInvocation.MyCommand.Definition)
+
+$testRbdWnbd = "$scriptLocation/test_rbd_wnbd.py"
+
+function safe_exec() {
+ # Powershell doesn't check the command exit code, we'll need to
+ # do it ourselves. Also, in case of native commands, it treats stderr
+ # output as an exception, which is why we'll have to capture it.
+ cmd /c "$args 2>&1"
+ if ($LASTEXITCODE) {
+ throw "Command failed: $args"
+ }
+}
+
+safe_exec python.exe $testRbdWnbd --test-name RbdTest --iterations 100
+safe_exec python.exe $testRbdWnbd --test-name RbdFioTest --iterations 100
+safe_exec python.exe $testRbdWnbd --test-name RbdStampTest --iterations 100
+
+# It can take a while to setup the partition (~10s), we'll use fewer iterations.
+safe_exec python.exe $testRbdWnbd --test-name RbdFsTest --iterations 4
+safe_exec python.exe $testRbdWnbd --test-name RbdFsFioTest --iterations 4
+safe_exec python.exe $testRbdWnbd --test-name RbdFsStampTest --iterations 4
+
+safe_exec python.exe $testRbdWnbd `
+ --test-name RbdResizeFioTest --image-size-mb 64
diff --git a/qa/workunits/windows/run-tests.sh b/qa/workunits/windows/run-tests.sh
new file mode 100644
index 000000000..b582491c5
--- /dev/null
+++ b/qa/workunits/windows/run-tests.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+set -ex
+
+DIR="$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)"
+
+source ${DIR}/libvirt_vm/build_utils.sh
+source ${DIR}/libvirt_vm/connection_info.sh
+
+# Run the Windows tests
+scp_upload ${DIR} /windows-workunits
+SSH_TIMEOUT=30m ssh_exec powershell.exe -File /windows-workunits/run-tests.ps1
diff --git a/qa/workunits/windows/test_rbd_wnbd.py b/qa/workunits/windows/test_rbd_wnbd.py
new file mode 100644
index 000000000..f22810e2e
--- /dev/null
+++ b/qa/workunits/windows/test_rbd_wnbd.py
@@ -0,0 +1,919 @@
+import argparse
+import collections
+import functools
+import json
+import logging
+import math
+import os
+import prettytable
+import random
+import subprocess
+import time
+import threading
+import typing
+import uuid
+from concurrent import futures
+
+LOG = logging.getLogger()
+
+parser = argparse.ArgumentParser(description='rbd-wnbd tests')
+parser.add_argument('--test-name',
+ help='The test to be run.',
+ default="RbdFioTest")
+parser.add_argument('--iterations',
+ help='Total number of test iterations',
+ default=1, type=int)
+parser.add_argument('--concurrency',
+ help='The number of tests to run in parallel',
+ default=4, type=int)
+parser.add_argument('--fio-iterations',
+ help='Total number of benchmark iterations per disk.',
+ default=1, type=int)
+parser.add_argument('--fio-workers',
+ help='Total number of fio workers per disk.',
+ default=1, type=int)
+parser.add_argument('--fio-depth',
+ help='The number of concurrent asynchronous operations '
+ 'executed per disk',
+ default=64, type=int)
+parser.add_argument('--fio-verify',
+ help='The mechanism used to validate the written '
+ 'data. Examples: crc32c, md5, sha1, null, etc. '
+ 'If set to null, the written data will not be '
+ 'verified.',
+ default='crc32c')
+parser.add_argument('--bs',
+ help='Benchmark block size.',
+ default="2M")
+parser.add_argument('--op',
+ help='Benchmark operation. '
+ 'Examples: read, randwrite, rw, etc.',
+ default="rw")
+parser.add_argument('--image-prefix',
+ help='The image name prefix.',
+ default="cephTest-")
+parser.add_argument('--image-size-mb',
+ help='The image size in megabytes.',
+ default=1024, type=int)
+parser.add_argument('--map-timeout',
+ help='Image map timeout.',
+ default=60, type=int)
+parser.add_argument('--skip-enabling-disk', action='store_true',
+ help='If set, the disk will not be turned online and the '
+ 'read-only flag will not be removed. Useful when '
+ 'the SAN policy is set to "onlineAll".')
+parser.add_argument('--verbose', action='store_true',
+ help='Print info messages.')
+parser.add_argument('--debug', action='store_true',
+ help='Print debug messages.')
+parser.add_argument('--stop-on-error', action='store_true',
+ help='Stop testing when hitting errors.')
+parser.add_argument('--skip-cleanup-on-error', action='store_true',
+ help='Skip cleanup when hitting errors.')
+
+
+class CephTestException(Exception):
+ msg_fmt = "An exception has been encountered."
+
+ def __init__(self, message: str = None, **kwargs):
+ self.kwargs = kwargs
+ if not message:
+ message = self.msg_fmt % kwargs
+ self.message = message
+ super(CephTestException, self).__init__(message)
+
+
+class CommandFailed(CephTestException):
+ msg_fmt = (
+ "Command failed: %(command)s. "
+ "Return code: %(returncode)s. "
+ "Stdout: %(stdout)s. Stderr: %(stderr)s.")
+
+
+class CephTestTimeout(CephTestException):
+ msg_fmt = "Operation timeout."
+
+
+def setup_logging(log_level: int = logging.INFO):
+ handler = logging.StreamHandler()
+ handler.setLevel(log_level)
+
+ log_fmt = '[%(asctime)s] %(levelname)s - %(message)s'
+ formatter = logging.Formatter(log_fmt)
+ handler.setFormatter(formatter)
+
+ LOG.addHandler(handler)
+ LOG.setLevel(logging.DEBUG)
+
+
+def retry_decorator(timeout: int = 60,
+ retry_interval: int = 2,
+ silent_interval: int = 10,
+ additional_details: str = "",
+ retried_exceptions:
+ typing.Union[
+ typing.Type[Exception],
+ collections.abc.Iterable[
+ typing.Type[Exception]]] = Exception):
+ def wrapper(f: typing.Callable[..., typing.Any]):
+ @functools.wraps(f)
+ def inner(*args, **kwargs):
+ tstart: float = time.time()
+ elapsed: float = 0
+ exc = None
+ details = additional_details or "%s failed" % f.__qualname__
+
+ while elapsed < timeout or not timeout:
+ try:
+ return f(*args, **kwargs)
+ except retried_exceptions as ex:
+ exc = ex
+ elapsed = time.time() - tstart
+ if elapsed > silent_interval:
+ level = logging.WARNING
+ else:
+ level = logging.DEBUG
+ LOG.log(level,
+ "Exception: %s. Additional details: %s. "
+ "Time elapsed: %d. Timeout: %d",
+ ex, details, elapsed, timeout)
+
+ time.sleep(retry_interval)
+ elapsed = time.time() - tstart
+
+ msg = (
+ "Operation timed out. Exception: %s. Additional details: %s. "
+ "Time elapsed: %d. Timeout: %d.")
+ raise CephTestTimeout(
+ msg % (exc, details, elapsed, timeout))
+ return inner
+ return wrapper
+
+
+def execute(*args, **kwargs):
+ LOG.debug("Executing: %s", args)
+ result = subprocess.run(
+ args,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ **kwargs)
+ LOG.debug("Command %s returned %d.", args, result.returncode)
+ if result.returncode:
+ exc = CommandFailed(
+ command=args, returncode=result.returncode,
+ stdout=result.stdout, stderr=result.stderr)
+ LOG.error(exc)
+ raise exc
+ return result
+
+
+def ps_execute(*args, **kwargs):
+ # Disable PS progress bar, causes issues when invoked remotely.
+ prefix = "$global:ProgressPreference = 'SilentlyContinue' ; "
+ return execute(
+ "powershell.exe", "-NonInteractive",
+ "-Command", prefix, *args, **kwargs)
+
+
+def array_stats(array: list):
+ mean = sum(array) / len(array) if len(array) else 0
+ variance = (sum((i - mean) ** 2 for i in array) / len(array)
+ if len(array) else 0)
+ std_dev = math.sqrt(variance)
+ sorted_array = sorted(array)
+
+ return {
+ 'min': min(array) if len(array) else 0,
+ 'max': max(array) if len(array) else 0,
+ 'sum': sum(array) if len(array) else 0,
+ 'mean': mean,
+ 'median': sorted_array[len(array) // 2] if len(array) else 0,
+ 'max_90': sorted_array[int(len(array) * 0.9)] if len(array) else 0,
+ 'min_90': sorted_array[int(len(array) * 0.1)] if len(array) else 0,
+ 'variance': variance,
+ 'std_dev': std_dev,
+ 'count': len(array)
+ }
+
+
+class Tracer:
+ data: collections.OrderedDict = collections.OrderedDict()
+ lock = threading.Lock()
+
+ @classmethod
+ def trace(cls, func):
+ def wrapper(*args, **kwargs):
+ tstart = time.time()
+ exc_str = None
+
+ # Preserve call order
+ with cls.lock:
+ if func.__qualname__ not in cls.data:
+ cls.data[func.__qualname__] = list()
+
+ try:
+ return func(*args, **kwargs)
+ except Exception as exc:
+ exc_str = str(exc)
+ raise
+ finally:
+ tend = time.time()
+
+ with cls.lock:
+ cls.data[func.__qualname__] += [{
+ "duration": tend - tstart,
+ "error": exc_str,
+ }]
+
+ return wrapper
+
+ @classmethod
+ def get_results(cls):
+ stats = collections.OrderedDict()
+ for f in cls.data.keys():
+ stats[f] = array_stats([i['duration'] for i in cls.data[f]])
+ errors = []
+ for i in cls.data[f]:
+ if i['error']:
+ errors.append(i['error'])
+
+ stats[f]['errors'] = errors
+ return stats
+
+ @classmethod
+ def print_results(cls):
+ r = cls.get_results()
+
+ table = prettytable.PrettyTable(title="Duration (s)")
+ table.field_names = [
+ "function", "min", "max", "total",
+ "mean", "median", "std_dev",
+ "max 90%", "min 90%", "count", "errors"]
+ table.float_format = ".4"
+ for f, s in r.items():
+ table.add_row([f, s['min'], s['max'], s['sum'],
+ s['mean'], s['median'], s['std_dev'],
+ s['max_90'], s['min_90'],
+ s['count'], len(s['errors'])])
+ print(table)
+
+
+class RbdImage(object):
+ def __init__(self,
+ name: str,
+ size_mb: int,
+ is_shared: bool = True,
+ disk_number: int = -1,
+ mapped: bool = False):
+ self.name = name
+ self.size_mb = size_mb
+ self.is_shared = is_shared
+ self.disk_number = disk_number
+ self.mapped = mapped
+ self.removed = False
+ self.drive_letter = ""
+
+ @classmethod
+ @Tracer.trace
+ def create(cls,
+ name: str,
+ size_mb: int = 1024,
+ is_shared: bool = True):
+ LOG.info("Creating image: %s. Size: %s.", name, "%sM" % size_mb)
+ cmd = ["rbd", "create", name, "--size", "%sM" % size_mb]
+ if is_shared:
+ cmd += ["--image-shared"]
+ execute(*cmd)
+
+ return RbdImage(name, size_mb, is_shared)
+
+ @Tracer.trace
+ def get_disk_number(self,
+ timeout: int = 60,
+ retry_interval: int = 2):
+ @retry_decorator(
+ retried_exceptions=CephTestException,
+ timeout=timeout,
+ retry_interval=retry_interval)
+ def _get_disk_number():
+ LOG.info("Retrieving disk number: %s", self.name)
+
+ result = execute("rbd-wnbd", "show", self.name, "--format=json")
+ disk_info = json.loads(result.stdout)
+ disk_number = disk_info["disk_number"]
+ if disk_number > 0:
+ LOG.debug("Image %s disk number: %d", self.name, disk_number)
+ return disk_number
+
+ raise CephTestException(
+ f"Could not get disk number: {self.name}.")
+
+ return _get_disk_number()
+
+ @Tracer.trace
+ def _wait_for_disk(self,
+ timeout: int = 60,
+ retry_interval: int = 2):
+ @retry_decorator(
+ retried_exceptions=(FileNotFoundError, OSError),
+ additional_details="the mapped disk isn't available yet",
+ timeout=timeout,
+ retry_interval=retry_interval)
+ def wait_for_disk():
+ LOG.debug("Waiting for disk to be accessible: %s %s",
+ self.name, self.path)
+
+ with open(self.path, 'rb'):
+ pass
+
+ return wait_for_disk()
+
+ @property
+ def path(self):
+ return f"\\\\.\\PhysicalDrive{self.disk_number}"
+
+ @Tracer.trace
+ @retry_decorator(additional_details="couldn't clear disk read-only flag")
+ def set_writable(self):
+ ps_execute(
+ "Set-Disk", "-Number", str(self.disk_number),
+ "-IsReadOnly", "$false")
+
+ @Tracer.trace
+ @retry_decorator(additional_details="couldn't bring the disk online")
+ def set_online(self):
+ ps_execute(
+ "Set-Disk", "-Number", str(self.disk_number),
+ "-IsOffline", "$false")
+
+ @Tracer.trace
+ def map(self, timeout: int = 60):
+ LOG.info("Mapping image: %s", self.name)
+ tstart = time.time()
+
+ execute("rbd-wnbd", "map", self.name)
+ self.mapped = True
+
+ self.disk_number = self.get_disk_number(timeout=timeout)
+
+ elapsed = time.time() - tstart
+ self._wait_for_disk(timeout=timeout - elapsed)
+
+ @Tracer.trace
+ def unmap(self):
+ if self.mapped:
+ LOG.info("Unmapping image: %s", self.name)
+ execute("rbd-wnbd", "unmap", self.name)
+ self.mapped = False
+
+ @Tracer.trace
+ def remove(self):
+ if not self.removed:
+ LOG.info("Removing image: %s", self.name)
+ execute("rbd", "rm", self.name)
+ self.removed = True
+
+ def cleanup(self):
+ try:
+ self.unmap()
+ finally:
+ self.remove()
+
+ @Tracer.trace
+ @retry_decorator()
+ def _init_disk(self):
+ cmd = f"Get-Disk -Number {self.disk_number} | Initialize-Disk"
+ ps_execute(cmd)
+
+ @Tracer.trace
+ @retry_decorator()
+ def _create_partition(self):
+ cmd = (f"Get-Disk -Number {self.disk_number} | "
+ "New-Partition -AssignDriveLetter -UseMaximumSize")
+ ps_execute(cmd)
+
+ @Tracer.trace
+ @retry_decorator()
+ def _format_volume(self):
+ cmd = (
+ f"(Get-Partition -DiskNumber {self.disk_number}"
+ " | ? { $_.DriveLetter }) | Format-Volume -Force -Confirm:$false")
+ ps_execute(cmd)
+
+ @Tracer.trace
+ @retry_decorator()
+ def _get_drive_letter(self):
+ cmd = (f"(Get-Partition -DiskNumber {self.disk_number}"
+ " | ? { $_.DriveLetter }).DriveLetter")
+ result = ps_execute(cmd)
+
+ # The PowerShell command will place a null character if no drive letter
+ # is available. For example, we can receive "\x00\r\n".
+ self.drive_letter = result.stdout.decode().strip()
+ if not self.drive_letter.isalpha() or len(self.drive_letter) != 1:
+ raise CephTestException(
+ "Invalid drive letter received: %s" % self.drive_letter)
+
+ @Tracer.trace
+ def init_fs(self):
+ if not self.mapped:
+ raise CephTestException("Unable to create fs, image not mapped.")
+
+ LOG.info("Initializing fs, image: %s.", self.name)
+
+ self._init_disk()
+ self._create_partition()
+ self._format_volume()
+ self._get_drive_letter()
+
+ @Tracer.trace
+ def get_fs_capacity(self):
+ if not self.drive_letter:
+ raise CephTestException("No drive letter available")
+
+ cmd = f"(Get-Volume -DriveLetter {self.drive_letter}).Size"
+ result = ps_execute(cmd)
+
+ return int(result.stdout.decode().strip())
+
+ @Tracer.trace
+ def resize(self, new_size_mb, allow_shrink=False):
+ LOG.info(
+ "Resizing image: %s. New size: %s MB, old size: %s MB",
+ self.name, new_size_mb, self.size_mb)
+
+ cmd = ["rbd", "resize", self.name,
+ "--size", f"{new_size_mb}M", "--no-progress"]
+ if allow_shrink:
+ cmd.append("--allow-shrink")
+
+ execute(*cmd)
+
+ self.size_mb = new_size_mb
+
+ @Tracer.trace
+ def get_disk_size(self):
+ """Retrieve the virtual disk size (bytes) reported by Windows."""
+ cmd = f"(Get-Disk -Number {self.disk_number}).Size"
+ result = ps_execute(cmd)
+
+ disk_size = result.stdout.decode().strip()
+ if not disk_size.isdigit():
+ raise CephTestException(
+ "Invalid disk size received: %s" % disk_size)
+
+ return int(disk_size)
+
+ @Tracer.trace
+ @retry_decorator(timeout=30)
+ def wait_for_disk_resize(self):
+ # After resizing the rbd image, the daemon is expected to receive
+ # the notification, inform the WNBD driver and then trigger a disk
+ # rescan (IOCTL_DISK_UPDATE_PROPERTIES). This might take a few seconds,
+ # so we'll need to do some polling.
+ disk_size = self.get_disk_size()
+ disk_size_mb = disk_size // (1 << 20)
+
+ if disk_size_mb != self.size_mb:
+ raise CephTestException(
+ "The disk size hasn't been updated yet. Retrieved size: "
+ f"{disk_size_mb}MB. Expected size: {self.size_mb}MB.")
+
+
+class RbdTest(object):
+ image: RbdImage
+
+ requires_disk_online = False
+ requires_disk_write = False
+
+ def __init__(self,
+ image_prefix: str = "cephTest-",
+ image_size_mb: int = 1024,
+ map_timeout: int = 60,
+ **kwargs):
+ self.image_size_mb = image_size_mb
+ self.image_name = image_prefix + str(uuid.uuid4())
+ self.map_timeout = map_timeout
+ self.skip_enabling_disk = kwargs.get("skip_enabling_disk")
+
+ @Tracer.trace
+ def initialize(self):
+ self.image = RbdImage.create(
+ self.image_name,
+ self.image_size_mb)
+ self.image.map(timeout=self.map_timeout)
+
+ if not self.skip_enabling_disk:
+ if self.requires_disk_write:
+ self.image.set_writable()
+
+ if self.requires_disk_online:
+ self.image.set_online()
+
+ def run(self):
+ pass
+
+ def cleanup(self):
+ if self.image:
+ self.image.cleanup()
+
+ @classmethod
+ def print_results(cls,
+ title: str = "Test results",
+ description: str = None):
+ pass
+
+
+class RbdFsTestMixin(object):
+ # Windows disks must be turned online before accessing partitions.
+ requires_disk_online = True
+ requires_disk_write = True
+
+ @Tracer.trace
+ def initialize(self):
+ super(RbdFsTestMixin, self).initialize()
+
+ self.image.init_fs()
+
+ def get_subpath(self, *args):
+ drive_path = f"{self.image.drive_letter}:\\"
+ return os.path.join(drive_path, *args)
+
+
+class RbdFsTest(RbdFsTestMixin, RbdTest):
+ pass
+
+
+class RbdFioTest(RbdTest):
+ data: typing.DefaultDict[str, typing.List[typing.Dict[str, str]]] = (
+ collections.defaultdict(list))
+ lock = threading.Lock()
+
+ def __init__(self,
+ *args,
+ fio_size_mb: int = None,
+ iterations: int = 1,
+ workers: int = 1,
+ bs: str = "2M",
+ iodepth: int = 64,
+ op: str = "rw",
+ verify: str = "crc32c",
+ **kwargs):
+
+ super(RbdFioTest, self).__init__(*args, **kwargs)
+
+ self.fio_size_mb = fio_size_mb or self.image_size_mb
+ self.iterations = iterations
+ self.workers = workers
+ self.bs = bs
+ self.iodepth = iodepth
+ self.op = op
+ if op not in ("read", "randread"):
+ self.requires_disk_write = True
+ self.verify = verify
+
+ def process_result(self, raw_fio_output: str):
+ result = json.loads(raw_fio_output)
+ with self.lock:
+ for job in result["jobs"]:
+ # Fio doesn't support trim on Windows
+ for op in ['read', 'write']:
+ if op in job:
+ self.data[op].append({
+ 'error': job['error'],
+ 'io_bytes': job[op]['io_bytes'],
+ 'bw_bytes': job[op]['bw_bytes'],
+ 'runtime': job[op]['runtime'] / 1000, # seconds
+ 'total_ios': job[op]['short_ios'],
+ 'short_ios': job[op]['short_ios'],
+ 'dropped_ios': job[op]['short_ios'],
+ 'clat_ns_min': job[op]['clat_ns']['min'],
+ 'clat_ns_max': job[op]['clat_ns']['max'],
+ 'clat_ns_mean': job[op]['clat_ns']['mean'],
+ 'clat_ns_stddev': job[op]['clat_ns']['stddev'],
+ 'clat_ns_10': job[op].get('clat_ns', {})
+ .get('percentile', {})
+ .get('10.000000', 0),
+ 'clat_ns_90': job[op].get('clat_ns', {})
+ .get('percentile', {})
+ .get('90.000000', 0)
+ })
+
+ def _get_fio_path(self):
+ return self.image.path
+
+ @Tracer.trace
+ def _run_fio(self, fio_size_mb=None):
+ LOG.info("Starting FIO test.")
+ cmd = [
+ "fio", "--thread", "--output-format=json",
+ "--randrepeat=%d" % self.iterations,
+ "--direct=1", "--name=test",
+ "--bs=%s" % self.bs, "--iodepth=%s" % self.iodepth,
+ "--size=%sM" % (fio_size_mb or self.fio_size_mb),
+ "--readwrite=%s" % self.op,
+ "--numjobs=%s" % self.workers,
+ "--filename=%s" % self._get_fio_path(),
+ ]
+ if self.verify:
+ cmd += ["--verify=%s" % self.verify]
+ result = execute(*cmd)
+ LOG.info("Completed FIO test.")
+ self.process_result(result.stdout)
+
+ @Tracer.trace
+ def run(self):
+ self._run_fio()
+
+ @classmethod
+ def print_results(cls,
+ title: str = "Benchmark results",
+ description: str = None):
+ if description:
+ title = "%s (%s)" % (title, description)
+
+ for op in cls.data.keys():
+ op_title = "%s op=%s" % (title, op)
+
+ table = prettytable.PrettyTable(title=op_title)
+ table.field_names = ["stat", "min", "max", "mean",
+ "median", "std_dev",
+ "max 90%", "min 90%", "total"]
+ table.float_format = ".4"
+
+ op_data = cls.data[op]
+
+ s = array_stats([float(i["bw_bytes"]) / 1000_000 for i in op_data])
+ table.add_row(["bandwidth (MB/s)",
+ s['min'], s['max'], s['mean'],
+ s['median'], s['std_dev'],
+ s['max_90'], s['min_90'], 'N/A'])
+
+ s = array_stats([float(i["runtime"]) for i in op_data])
+ table.add_row(["duration (s)",
+ s['min'], s['max'], s['mean'],
+ s['median'], s['std_dev'],
+ s['max_90'], s['min_90'], s['sum']])
+
+ s = array_stats([i["error"] for i in op_data])
+ table.add_row(["errors",
+ s['min'], s['max'], s['mean'],
+ s['median'], s['std_dev'],
+ s['max_90'], s['min_90'], s['sum']])
+
+ s = array_stats([i["short_ios"] for i in op_data])
+ table.add_row(["incomplete IOs",
+ s['min'], s['max'], s['mean'],
+ s['median'], s['std_dev'],
+ s['max_90'], s['min_90'], s['sum']])
+
+ s = array_stats([i["dropped_ios"] for i in op_data])
+ table.add_row(["dropped IOs",
+ s['min'], s['max'], s['mean'],
+ s['median'], s['std_dev'],
+ s['max_90'], s['min_90'], s['sum']])
+
+ clat_min = array_stats([i["clat_ns_min"] for i in op_data])
+ clat_max = array_stats([i["clat_ns_max"] for i in op_data])
+ clat_mean = array_stats([i["clat_ns_mean"] for i in op_data])
+ clat_stddev = math.sqrt(
+ sum([float(i["clat_ns_stddev"]) ** 2 for i in op_data]) / len(op_data)
+ if len(op_data) else 0)
+ clat_10 = array_stats([i["clat_ns_10"] for i in op_data])
+ clat_90 = array_stats([i["clat_ns_90"] for i in op_data])
+ # For convenience, we'll convert it from ns to seconds.
+ table.add_row(["completion latency (s)",
+ clat_min['min'] / 1e+9,
+ clat_max['max'] / 1e+9,
+ clat_mean['mean'] / 1e+9,
+ clat_mean['median'] / 1e+9,
+ clat_stddev / 1e+9,
+ clat_10['mean'] / 1e+9,
+ clat_90['mean'] / 1e+9,
+ clat_mean['sum'] / 1e+9])
+ print(table)
+
+
+class RbdResizeFioTest(RbdFioTest):
+ """Image resize test.
+
+ This test extends and then shrinks the image, performing FIO tests to
+ validate the resized image.
+ """
+
+ @Tracer.trace
+ def run(self):
+ self.image.resize(self.image_size_mb * 2)
+ self.image.wait_for_disk_resize()
+
+ self._run_fio(fio_size_mb=self.image_size_mb * 2)
+
+ self.image.resize(self.image_size_mb // 2, allow_shrink=True)
+ self.image.wait_for_disk_resize()
+
+ self._run_fio(fio_size_mb=self.image_size_mb // 2)
+
+ # Just like rbd-nbd, rbd-wnbd is masking out-of-bounds errors.
+ # For this reason, we don't have a negative test that writes
+ # passed the disk boundary.
+
+
+class RbdFsFioTest(RbdFsTestMixin, RbdFioTest):
+ def initialize(self):
+ super(RbdFsFioTest, self).initialize()
+
+ if not self.fio_size_mb or self.fio_size_mb == self.image_size_mb:
+ # Out of caution, we'll use up to 80% of the FS by default
+ self.fio_size_mb = int(
+ self.image.get_fs_capacity() * 0.8 / (1024 * 1024))
+
+ @staticmethod
+ def _fio_escape_path(path):
+ # FIO allows specifying multiple files separated by colon.
+ # This means that ":" has to be escaped, so
+ # F:\filename becomes F\:\filename.
+ return path.replace(":", "\\:")
+
+ def _get_fio_path(self):
+ return self._fio_escape_path(self.get_subpath("test-fio"))
+
+
+class RbdStampTest(RbdTest):
+ requires_disk_write = True
+
+ _write_open_mode = "rb+"
+ _read_open_mode = "rb"
+ _expect_path_exists = True
+
+ @staticmethod
+ def _rand_float(min_val: float, max_val: float):
+ return min_val + (random.random() * max_val - min_val)
+
+ def _get_stamp(self):
+ buff = self.image_name.encode()
+ padding = 512 - len(buff)
+ buff += b'\0' * padding
+ return buff
+
+ def _get_stamp_path(self):
+ return self.image.path
+
+ @Tracer.trace
+ def _write_stamp(self):
+ with open(self._get_stamp_path(), self._write_open_mode) as disk:
+ stamp = self._get_stamp()
+ disk.write(stamp)
+
+ @Tracer.trace
+ def _read_stamp(self):
+ with open(self._get_stamp_path(), self._read_open_mode) as disk:
+ return disk.read(len(self._get_stamp()))
+
+ @Tracer.trace
+ def run(self):
+ if self._expect_path_exists:
+ # Wait up to 5 seconds and then check the disk, ensuring that
+ # nobody else wrote to it. This is particularly useful when
+ # running a high number of tests in parallel, ensuring that
+ # we aren't writing to the wrong disk.
+ time.sleep(self._rand_float(0, 5))
+
+ stamp = self._read_stamp()
+ assert stamp == b'\0' * len(self._get_stamp())
+
+ self._write_stamp()
+
+ stamp = self._read_stamp()
+ assert stamp == self._get_stamp()
+
+
+class RbdFsStampTest(RbdFsTestMixin, RbdStampTest):
+ _write_open_mode = "wb"
+ _expect_path_exists = False
+
+ def _get_stamp_path(self):
+ return self.get_subpath("test-stamp")
+
+
+class TestRunner(object):
+ def __init__(self,
+ test_cls: typing.Type[RbdTest],
+ test_params: dict = {},
+ iterations: int = 1,
+ workers: int = 1,
+ stop_on_error: bool = False,
+ cleanup_on_error: bool = True):
+ self.test_cls = test_cls
+ self.test_params = test_params
+ self.iterations = iterations
+ self.workers = workers
+ self.executor = futures.ThreadPoolExecutor(max_workers=workers)
+ self.lock = threading.Lock()
+ self.completed = 0
+ self.errors = 0
+ self.stopped = False
+ self.stop_on_error = stop_on_error
+ self.cleanup_on_error = cleanup_on_error
+
+ @Tracer.trace
+ def run(self):
+ tasks = []
+ for i in range(self.iterations):
+ task = self.executor.submit(self.run_single_test)
+ tasks.append(task)
+
+ LOG.info("Waiting for %d tests to complete.", self.iterations)
+ for task in tasks:
+ task.result()
+
+ def run_single_test(self):
+ failed = False
+ if self.stopped:
+ return
+
+ try:
+ test = self.test_cls(**self.test_params)
+ test.initialize()
+ test.run()
+ except KeyboardInterrupt:
+ LOG.warning("Received Ctrl-C.")
+ self.stopped = True
+ except Exception as ex:
+ failed = True
+ if self.stop_on_error:
+ self.stopped = True
+ with self.lock:
+ self.errors += 1
+ LOG.exception(
+ "Test exception: %s. Total exceptions: %d",
+ ex, self.errors)
+ finally:
+ if not failed or self.cleanup_on_error:
+ try:
+ test.cleanup()
+ except KeyboardInterrupt:
+ LOG.warning("Received Ctrl-C.")
+ self.stopped = True
+ # Retry the cleanup
+ test.cleanup()
+ except Exception:
+ LOG.exception("Test cleanup failed.")
+
+ with self.lock:
+ self.completed += 1
+ LOG.info("Completed tests: %d. Pending: %d",
+ self.completed, self.iterations - self.completed)
+
+
+TESTS: typing.Dict[str, typing.Type[RbdTest]] = {
+ 'RbdTest': RbdTest,
+ 'RbdFioTest': RbdFioTest,
+ 'RbdResizeFioTest': RbdResizeFioTest,
+ 'RbdStampTest': RbdStampTest,
+ # FS tests
+ 'RbdFsTest': RbdFsTest,
+ 'RbdFsFioTest': RbdFsFioTest,
+ 'RbdFsStampTest': RbdFsStampTest,
+}
+
+if __name__ == '__main__':
+ args = parser.parse_args()
+
+ log_level = logging.WARNING
+ if args.verbose:
+ log_level = logging.INFO
+ if args.debug:
+ log_level = logging.DEBUG
+ setup_logging(log_level)
+
+ test_params = dict(
+ image_size_mb=args.image_size_mb,
+ image_prefix=args.image_prefix,
+ bs=args.bs,
+ op=args.op,
+ verify=args.fio_verify,
+ iodepth=args.fio_depth,
+ map_timeout=args.map_timeout,
+ skip_enabling_disk=args.skip_enabling_disk,
+ )
+
+ try:
+ test_cls = TESTS[args.test_name]
+ except KeyError:
+ raise CephTestException("Unkown test: {}".format(args.test_name))
+
+ runner = TestRunner(
+ test_cls,
+ test_params=test_params,
+ iterations=args.iterations,
+ workers=args.concurrency,
+ stop_on_error=args.stop_on_error,
+ cleanup_on_error=not args.skip_cleanup_on_error)
+ runner.run()
+
+ Tracer.print_results()
+ test_cls.print_results(
+ description="count: %d, concurrency: %d" %
+ (args.iterations, args.concurrency))
+
+ assert runner.errors == 0, f"encountered {runner.errors} error(s)."